input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
<reponame>jverce/tensorflow
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classes and functions that help to inspect Python source w.r.t. TF graphs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import re
import zipfile
import absl
import numpy as np
from tensorflow.python.debug.lib import profiling
_TENSORFLOW_BASEDIR = os.path.dirname(
os.path.dirname(os.path.dirname(os.path.dirname(
os.path.normpath(os.path.abspath(__file__))))))
_ABSL_BASEDIR = os.path.dirname(absl.__file__)
UNCOMPILED_SOURCE_SUFFIXES = (".py")
COMPILED_SOURCE_SUFFIXES = (".pyc", ".pyo")
def _norm_abs_path(file_path):
return os.path.normpath(os.path.abspath(file_path))
def is_extension_uncompiled_python_source(file_path):
_, extension = os.path.splitext(file_path)
return extension.lower() in UNCOMPILED_SOURCE_SUFFIXES
def is_extension_compiled_python_source(file_path):
_, extension = os.path.splitext(file_path)
return extension.lower() in COMPILED_SOURCE_SUFFIXES
def _convert_watch_key_to_tensor_name(watch_key):
return watch_key[:watch_key.rfind(":")]
def guess_is_tensorflow_py_library(py_file_path):
"""Guess whether a Python source file is a part of the tensorflow library.
Special cases:
1) Returns False for unit-test files in the library (*_test.py),
2) Returns False for files under python/debug/examples.
Args:
py_file_path: full path of the Python source file in question.
Returns:
(`bool`) Whether the file is a part of the tensorflow library.
Raises:
ValueError: if the extension name of py_file_path does not indicate a Python
source file (compiled or uncomplied).
"""
if (not is_extension_uncompiled_python_source(py_file_path) and
not is_extension_compiled_python_source(py_file_path)):
raise ValueError(
"Input file path (%s) is not a Python source file." % py_file_path)
py_file_path = _norm_abs_path(py_file_path)
return ((py_file_path.startswith(_TENSORFLOW_BASEDIR) or
py_file_path.startswith(_ABSL_BASEDIR)) and
not py_file_path.endswith("_test.py") and
(os.path.normpath("tensorflow/python/debug/examples") not in
os.path.normpath(py_file_path)))
def load_source(source_file_path):
"""Load the content of a Python source code file.
This function covers the following case:
1. source_file_path points to an existing Python (.py) file on the
file system.
2. source_file_path is a path within a .par file (i.e., a zip-compressed,
self-contained Python executable).
Args:
source_file_path: Path to the Python source file to read.
Returns:
A length-2 tuple:
- Lines of the source file, as a `list` of `str`s.
- The width of the string needed to show the line number in the file.
This is calculated based on the number of lines in the source file.
Raises:
IOError: if loading is unsuccessful.
"""
if os.path.isfile(source_file_path):
with open(source_file_path, "rb") as f:
source_text = f.read().decode("utf-8")
source_lines = source_text.split("\n")
else:
# One possible reason why the file doesn't exist is that it's a path
# inside a .par file. Try that possibility.
source_lines = _try_load_par_source(source_file_path)
if source_lines is None:
raise IOError(
"Source path neither exists nor can be loaded as a .par file: %s" %
source_file_path)
line_num_width = int(np.ceil(np.log10(len(source_lines)))) + 3
return source_lines, line_num_width
def _try_load_par_source(source_file_path):
"""Try loading the source code inside a .par file.
A .par file is a zip-compressed, self-contained Python executable.
It contains the content of individual Python source files that can
be read only through extracting from the zip file.
Args:
source_file_path: The full path to the file inside the .par file. This
path should include the path to the .par file itself, followed by the
intra-par path, e.g.,
"/tmp/my_executable.par/org-tensorflow/tensorflow/python/foo/bar.py".
Returns:
If successful, lines of the source file as a `list` of `str`s.
Else, `None`.
"""
path_items = [item for item in source_file_path.split(os.path.sep) if item]
prefix_path = os.path.sep
for i, path_item in enumerate(path_items):
prefix_path = os.path.join(prefix_path, path_item)
if (prefix_path.endswith(".par") and os.path.isfile(prefix_path)
and i < len(path_items) - 1):
suffix_path = os.path.sep.join(path_items[i + 1:])
with zipfile.ZipFile(prefix_path) as z:
if suffix_path not in z.namelist():
return None
with z.open(suffix_path) as zf:
source_text = zf.read().decode("utf-8")
return source_text.split("\n")
def annotate_source(dump,
source_file_path,
do_dumped_tensors=False,
file_stack_top=False,
min_line=None,
max_line=None):
"""Annotate a Python source file with a list of ops created at each line.
(The annotation doesn't change the source file itself.)
Args:
dump: (`DebugDumpDir`) A `DebugDumpDir` object of which the Python graph
has been loaded.
source_file_path: (`str`) Path to the source file being annotated.
do_dumped_tensors: (`str`) Whether dumped Tensors, instead of ops are to be
used to annotate the source file.
file_stack_top: (`bool`) Whether only the top stack trace in the
specified source file is to be annotated.
min_line: (`None` or `int`) The 1-based line to start annotate the source
file from (inclusive).
max_line: (`None` or `int`) The 1-based line number to end the annotation
at (exclusive).
Returns:
A `dict` mapping 1-based line number to a list of op name(s) created at
that line, or tensor names if `do_dumped_tensors` is True.
Raises:
ValueError: If the dump object does not have a Python graph set.
"""
py_graph = dump.python_graph
if not py_graph:
raise ValueError("Cannot perform source annotation due to a lack of set "
"Python graph in the dump object")
source_file_path = _norm_abs_path(source_file_path)
line_to_op_names = {}
for op in py_graph.get_operations():
for file_path, line_number, _, _ in reversed(dump.node_traceback(op.name)):
if (min_line is not None and line_number < min_line or
max_line is not None and line_number >= max_line):
continue
if _norm_abs_path(file_path) != source_file_path:
continue
if do_dumped_tensors:
watch_keys = dump.debug_watch_keys(op.name)
# Convert watch keys to unique Tensor names.
items_to_append = list(
set(map(_convert_watch_key_to_tensor_name, watch_keys)))
else:
items_to_append = [op.name]
if line_number in line_to_op_names:
line_to_op_names[line_number].extend(items_to_append)
else:
line_to_op_names[line_number] = items_to_append
if file_stack_top:
break
return line_to_op_names
def list_source_files_against_dump(dump,
path_regex_whitelist=None,
node_name_regex_whitelist=None):
"""Generate a list of source files with information regarding ops and tensors.
Args:
dump: (`DebugDumpDir`) A `DebugDumpDir` object of which the Python graph
has been loaded.
path_regex_whitelist: A regular-expression filter for source file path.
node_name_regex_whitelist: A regular-expression filter for node names.
Returns:
A list of tuples regarding the Python source files involved in constructing
the ops and tensors contained in `dump`. Each tuple is:
(source_file_path, is_tf_library, num_nodes, num_tensors, num_dumps,
first_line)
is_tf_library: (`bool`) A guess of whether the file belongs to the
TensorFlow Python library.
num_nodes: How many nodes were created by lines of this source file.
These include nodes with dumps and those without.
num_tensors: How many Tensors were created by lines of this source file.
These include Tensors with dumps and those without.
num_dumps: How many debug Tensor dumps were from nodes (and Tensors)
that were created by this source file.
first_line: The first line number (1-based) that created any nodes or
Tensors in this source file.
The list is sorted by ascending order of source_file_path.
Raises:
ValueError: If the dump object does not have a Python graph set.
"""
py_graph = dump.python_graph
if not py_graph:
raise ValueError("Cannot generate source list due to a lack of set "
"Python graph in the dump object")
path_to_node_names = collections.defaultdict(set)
path_to_tensor_names = collections.defaultdict(set)
path_to_first_line = {}
tensor_name_to_num_dumps = {}
path_regex = (re.compile(path_regex_whitelist)
if path_regex_whitelist else None)
node_name_regex = (re.compile(node_name_regex_whitelist)
if node_name_regex_whitelist else None)
to_skip_file_paths = set()
for op in py_graph.get_operations():
if node_name_regex and not node_name_regex.match(op.name):
continue
for file_path, line_number, _, _ in dump.node_traceback(op.name):
file_path = _norm_abs_path(file_path)
if (file_path in to_skip_file_paths or
path_regex and not path_regex.match(file_path) or
not os.path.isfile(file_path)):
to_skip_file_paths.add(file_path)
continue
path_to_node_names[file_path].add(op.name)
if file_path in path_to_first_line:
if path_to_first_line[file_path] > line_number:
path_to_first_line[file_path] = line_number
else:
path_to_first_line[file_path] = line_number
for output_tensor in op.outputs:
tensor_name = output_tensor.name
path_to_tensor_names[file_path].add(tensor_name)
watch_keys = dump.debug_watch_keys(op.name)
for watch_key in watch_keys:
node_name, output_slot, debug_op = watch_key.split(":")
tensor_name = "%s:%s" % (node_name, output_slot)
if tensor_name not in tensor_name_to_num_dumps:
tensor_name_to_num_dumps[tensor_name] = len(
dump.get_tensors(node_name, int(output_slot), debug_op))
path_to_num_dumps = {}
for path in path_to_tensor_names:
path_to_num_dumps[path] = sum(
tensor_name_to_num_dumps.get(tensor_name, 0)
for tensor_name in path_to_tensor_names[path])
output = []
for file_path in path_to_node_names:
output.append((
file_path,
guess_is_tensorflow_py_library(file_path),
len(path_to_node_names.get(file_path, {})),
len(path_to_tensor_names.get(file_path, {})),
path_to_num_dumps.get(file_path, 0),
path_to_first_line[file_path]))
return sorted(output, key=lambda x: x[0])
def annotate_source_against_profile(profile_data,
source_file_path,
node_name_filter=None,
op_type_filter=None,
min_line=None,
max_line=None):
"""Annotate a Python source file with profiling information at each line.
(The annotation doesn't change the source file itself.)
Args:
profile_data: (`list` of `ProfileDatum`) A list of `ProfileDatum`.
source_file_path: (`str`) Path to the source file being annotated.
node_name_filter: Regular expression to filter by node name.
op_type_filter: Regular expression to filter by op type.
min_line: (`None` or `int`) The 1-based line to start annotate the source
file from (inclusive).
max_line: (`None` or `int`) The 1-based line number to end the annotation
at (exclusive).
| |
work with this Op.
"""
if workmem is not None:
if algo is not None:
raise ValueError("You can't use both algo and workmem")
warnings.warn("workmem is deprecated, use algo instead", stacklevel=2)
algo = workmem
fgraph = getattr(img, 'fgraph', None) or getattr(kerns, 'fgraph', None)
ctx_name = infer_context_name(img, kerns)
if (border_mode == 'valid' and subsample == (1, 1) and
direction_hint == 'bprop weights'):
# Special case: We are asked to use GpuDnnConvGradW. We need to set
# up a suitable 'fake' convolution to compute the gradient for.
img = gpu_contiguous(img.dimshuffle(1, 0, 2, 3))
if conv_mode == 'conv':
# We need to flip manually. These 'kerns' are not the kernels
# that would be flipped by conv_mode='conv' in GpuDnnConvGradW.
kerns = kerns[:, :, ::-1, ::-1]
kerns = gpu_contiguous(kerns.dimshuffle(1, 0, 2, 3))
shape2 = shape_i(img, 2, fgraph) - shape_i(kerns, 2, fgraph) + 1
shape3 = shape_i(img, 3, fgraph) - shape_i(kerns, 3, fgraph) + 1
out = GpuAllocEmpty(img.dtype, ctx_name)(
shape_i(kerns, 1, fgraph),
shape_i(img, 1, fgraph), shape2, shape3)
desc = GpuDnnConvDesc(border_mode='valid', subsample=(1, 1),
conv_mode='cross')(out.shape)
conv = GpuDnnConvGradW()(img, kerns, out, desc)
return as_gpuarray_variable(conv.dimshuffle(1, 0, 2, 3), ctx_name)
elif (border_mode == 'full' and subsample == (1, 1) and
direction_hint != 'forward!'):
# Special case: We can be faster by using GpuDnnConvGradI to compute
# the full convolution as the backward pass of a valid convolution.
# We just need to set up a suitable 'fake' valid convolution.
img = gpu_contiguous(img) # cudnn v2 rc3 need contiguous data
kerns = gpu_contiguous(kerns.dimshuffle(1, 0, 2, 3))
conv_mode = 'cross' if conv_mode == 'conv' else 'conv'
shape2 = shape_i(img, 2, fgraph) + shape_i(kerns, 2, fgraph) - 1
shape3 = shape_i(img, 3, fgraph) + shape_i(kerns, 3, fgraph) - 1
out = GpuAllocEmpty(img.dtype, ctx_name)(shape_i(img, 0, fgraph),
shape_i(kerns, 1, fgraph),
shape2, shape3)
desc = GpuDnnConvDesc(border_mode='valid', subsample=(1, 1),
conv_mode=conv_mode)(kerns.shape)
return GpuDnnConvGradI()(kerns, img, out, desc)
# Standard case: We use GpuDnnConv with suitable padding.
# contig_version will return a gpu_contiguous copy
# if the img contains negative strides
img = gpu_contiguous(img)
kerns = gpu_contiguous(kerns)
desc = GpuDnnConvDesc(border_mode=border_mode, subsample=subsample,
conv_mode=conv_mode)(kerns.shape)
desc_op = desc.owner.op
out_shp = GpuDnnConv.get_out_shape(img.shape, kerns.shape,
desc_op.border_mode,
desc_op.subsample)
out = GpuAllocEmpty(img.dtype, ctx_name)(*out_shp)
return GpuDnnConv(algo=algo)(img, kerns, out, desc)
class GpuDnnPoolDesc(Op):
"""
This Op builds a pooling descriptor for use in the other
pooling operations.
`ws`, `stride` and `pad` must have the same length.
Parameters
----------
ws : tuple
Window size.
stride : tuple
(dx, dy) or (dx, dy, dz).
mode : {'max', 'average_inc_pad', 'average_exc_pad'}
The old deprecated name 'average' corresponds to 'average_inc_pad'.
pad : tuple
(padX, padY) or (padX, padY, padZ)
"""
__props__ = ('ws', 'stride', 'mode', 'pad')
def c_headers(self):
return ['cudnn.h', 'cudnn_helper.h']
def c_header_dirs(self):
return [os.path.dirname(__file__), config.dnn.include_path]
def c_libraries(self):
return ['cudnn']
def c_lib_dirs(self):
return [config.dnn.library_path]
def do_constant_folding(self, node):
return False
def __init__(self, ws=(1, 1), stride=(1, 1), mode='max', pad=(0, 0)):
if mode == 'average':
mode = 'average_inc_pad'
assert mode in ('max', 'average_inc_pad', 'average_exc_pad')
self.mode = mode
assert len(ws) == len(stride) and len(stride) == len(pad)
assert len(ws) in (2, 3)
self.ws = ws
self.stride = stride
self.pad = pad
if self.get_ndim() == 3 and version() < 3000:
raise RuntimeError("CuDNN 3d pooling requires v3")
def get_ndim(self):
return len(self.ws)
def __setstate__(self, d):
self.__dict__.update(d)
if not hasattr(self, 'pad'):
self.pad = (0, 0)
def make_node(self):
return Apply(self, [],
[CDataType("cudnnPoolingDescriptor_t",
freefunc="cudnnDestroyPoolingDescriptor")()])
def c_code(self, node, name, inputs, outputs, sub):
desc, = outputs
if self.mode == 'max':
mode_flag = 'CUDNN_POOLING_MAX'
elif self.mode == "average_inc_pad":
mode_flag = 'CUDNN_POOLING_AVERAGE_COUNT_INCLUDE_PADDING'
elif self.mode == "average_exc_pad":
mode_flag = 'CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING'
else:
raise NotImplementedError("Unsupported pooling model.")
return """
{
cudnnStatus_t err;
if ((err = cudnnCreatePoolingDescriptor(&%(desc)s)) != CUDNN_STATUS_SUCCESS) {
PyErr_Format(PyExc_MemoryError, "could not allocate pooling "
"descriptor: %%s", cudnnGetErrorString(err));
%(fail)s
}
static const int win[%(nd)d] = {%(win)s};
static const int pad[%(nd)d] = {%(pad)s};
static const int str[%(nd)d] = {%(str)s};
err = cudnnSetPoolingNdDescriptor(
%(desc)s, %(mode_flag)s, %(nd)d,
win, pad, str);
if (err != CUDNN_STATUS_SUCCESS) {
PyErr_Format(PyExc_RuntimeError, "could not set op descriptor: %%s",
cudnnGetErrorString(err));
%(fail)s
}
}
""" % dict(name=name, desc=desc, mode_flag=mode_flag, fail=sub['fail'],
nd=self.get_ndim(), win=', '.join(map(str, self.ws)),
pad=', '.join(map(str, self.pad)),
str=', '.join(map(str, self.stride)))
def c_code_cache_version(self):
return (3, version())
class GpuDnnPool(DnnBase):
"""
Pooling.
Parameters
----------
img
The image 4d tensor.
desc
The pooling descriptor.
"""
__props__ = ()
def __init__(self):
DnnBase.__init__(self, ["dnn_pool.c"], "APPLY_SPECIFIC(dnn_pool)")
def make_node(self, img, desc):
img = as_gpuarray_variable(img, infer_context_name(img))
if desc.owner is not None:
e_ndim = desc.owner.op.get_ndim() + 2
if img.type.ndim != e_ndim:
raise TypeError('img must be %dD tensor' % (e_ndim,))
if (not isinstance(desc.type, CDataType) or
desc.type.ctype != 'cudnnPoolingDescriptor_t'):
raise TypeError('desc must be cudnnPoolingDescriptor_t')
return Apply(self, [img, desc], [img.type()])
def infer_shape(self, node, shape):
desc = node.inputs[1].owner.op
w = desc.ws
s = desc.stride
p = desc.pad
res = [shape[0][0], shape[0][1],
(shape[0][2] + 2 * p[0] - w[0]) // s[0] + 1,
(shape[0][3] + 2 * p[1] - w[1]) // s[1] + 1
]
if len(w) > 2:
res.append((shape[0][4] + 2 * p[2] - w[2]) // s[2] + 1)
return [res]
def grad(self, inp, grads):
img, desc = inp
grad, = grads
grad = gpu_contiguous(grad)
out = self(img, desc)
g_out = GpuDnnPoolGrad()(img, out, grad, desc)
return g_out, theano.gradient.DisconnectedType()()
def connection_pattern(self, node):
# not connected to desc
return [[1], [0]]
class GpuDnnPoolGrad(DnnBase):
"""
The pooling gradient.
Parameters
----------
inp
The input of the pooling.
out
The output of the pooling in the forward.
out_grad
Same size as out, but is the corresponding gradient information.
desc
The pooling descriptor.
"""
__props__ = ()
def __init__(self):
DnnBase.__init__(self, ["dnn_pool_grad.c"],
"APPLY_SPECIFIC(dnn_pool_grad)")
def make_node(self, inp, out, out_grad, desc):
ctx_name = infer_context_name(inp, out, out_grad)
inp = as_gpuarray_variable(inp, ctx_name)
out_grad = as_gpuarray_variable(out_grad, ctx_name)
out = as_gpuarray_variable(out, ctx_name)
if desc.owner is not None:
nd = desc.owner.op.get_ndim() + 2
if inp.type.ndim != nd:
raise TypeError('inp must be %dD tensor' % (nd,))
if out_grad.type.ndim != nd:
raise TypeError('out_grad must be %dD tensor' % (nd,))
if out.type.ndim != nd:
raise TypeError('out must be %dD tensor' % (nd,))
if (not isinstance(desc.type, CDataType) or
desc.type.ctype != 'cudnnPoolingDescriptor_t'):
raise TypeError('desc must be cudnnPoolingDescriptor_t')
return Apply(self, [inp, out, out_grad, desc], [inp.type()])
def infer_shape(self, node, shape):
return [shape[0]]
def dnn_pool(img, ws, stride=(1, 1), mode='max', pad=(0, 0)):
"""
GPU pooling using cuDNN from NVIDIA.
The memory layout to use is 'bc01', that is 'batch', 'channel',
'first dim', 'second dim' in that order.
`ws`, `stride` and `pad` must have the same length.
Parameters
----------
img
Images to do the pooling over.
ws : tuple
Subsampling window size.
stride : tuple
Subsampling stride (default: (1, 1)).
mode : {'max', 'average_inc_pad', 'average_exc_pad'}
pad : tuple
(padX, padY) or (padX, padY, padZ)
default: (0, 0)
.. warning:: The cuDNN library only works with GPU that have a compute
capability of 3.0 or higer. This means that older GPU will not
work with this Op.
Notes
-----
This Op implements the ignore_border=True of max_pool_2d.
"""
img = gpu_contiguous(img)
desc = GpuDnnPoolDesc(ws=ws, stride=stride, mode=mode, pad=pad)()
return GpuDnnPool()(img, desc)
class GpuDnnSoftmaxBase(DnnBase):
"""
Op for the cuDNN Softmax.
Parameters
----------
algo
'fast' or 'accurate' indicating whether computations should be
optimized for speed or accuracy respectively.
mode
'instance' or 'channel' indicating whether the softmax should be
computed per image across 'c01' or per spatial location '01' per
image across 'c'.
"""
__props__ = ('mode', 'algo')
def __init__(self, algo, mode):
DnnBase.__init__(self, [self.file], self.c_func)
assert(algo in ('fast', 'accurate', 'log'))
if algo == 'log' and version() < 3000:
raise RuntimeError("Need CuDNN v3 for log-softmax")
self.algo = algo
assert(mode in ('instance', 'channel'))
self.mode = mode
def infer_shape(self, node, shape):
if self.direction == 'forward':
return [shape[0]]
else:
return [shape[1]]
def get_op_params(self):
if self.mode == 'instance':
mode = "CUDNN_SOFTMAX_MODE_INSTANCE"
else:
mode = "CUDNN_SOFTMAX_MODE_CHANNEL"
if self.algo == 'fast':
algo = "CUDNN_SOFTMAX_FAST"
elif self.algo == 'log':
algo = "CUDNN_SOFTMAX_LOG"
else:
algo = "CUDNN_SOFTMAX_ACCURATE"
return [("SOFTMAX_MODE", mode), ("SOFTMAX_ALGO", algo)]
class GpuDnnSoftmax(GpuDnnSoftmaxBase):
"""
Op for the cuDNN Softmax.
algo
'fast' or 'accurate' indicating whether computations should be
optimized for speed or accuracy respectively.
mode
'instance' or 'channel' indicating whether the softmax should be
computed per image across 'c01' or per spatial location '01' per
image across 'c'.
"""
direction = "forward"
file = "dnn_softmax.c"
c_func = | |
c_1.astype(int)
x11,y11,x12,y12=get_average_line(c_0,img)
x21,y21,x22,y22=get_average_line(c_1,img)
final_avg_lanes = np.zeros_like(img)
cv2.line(final_avg_lanes,(int(x11),int(y11)),(int(x12),int(y12)),(255,255,255),1)
cv2.line(final_avg_lanes,(int(x21),int(y21)),(int(x22),int(y22)),(255,255,255),1)
plt.imsave('final_avg_lanes.jpg', final_avg_lanes, cmap='gray')
plt.show()
def unsharp_mask(image, kernel_size=(5, 5), sigma=1.0, amount=1.0, threshold=0):
"""Return a sharpened version of the image, using an unsharp mask."""
blurred = cv2.GaussianBlur(image, kernel_size, sigma)
sharpened = float(amount + 1) * image - float(amount) * blurred
sharpened = np.maximum(sharpened, np.zeros(sharpened.shape))
sharpened = np.minimum(sharpened, 255 * np.ones(sharpened.shape))
sharpened = sharpened.round().astype(np.uint8)
if threshold > 0:
low_contrast_mask = np.absolute(image - blurred) < threshold
np.copyto(sharpened, image, where=low_contrast_mask)
return sharpened
################################################### old end
coco_class_dic_with_index = {
'person': 1,
'bicycle': 2,
'car': 3,
'motorbike': 4,
'aeroplane': 5,
'bus': 6,
'train': 7,
'truck': 8,
'boat': 9,
'bench': 14,
'bird': 15,
'cat': 16,
'dog': 17,
'horse': 18,
'sheep': 19,
'cow': 20,
'elephant': 21,
'zebra': 23,
'giraffe': 24,
'backpack': 25,
'umbrella': 26,
'handbag': 27,
'tie': 28,
'suitcase': 29,
'frisbee': 30,
'skis': 31,
'snowboard': 32,
'kite': 34,
'skateboard': 37,
'surfboard': 38,
'bottle': 40,
'cup': 42,
'fork': 43,
'knife': 44,
'spoon': 45,
'bowl': 46,
'banana': 47,
'apple': 48,
'sandwich': 49,
'orange': 50,
'broccoli': 51,
'carrot': 52,
'pizza': 54,
'doughnut': 55,
'cake': 56,
'chair': 57,
'sofa': 58,
'potted plant': 59,
'bed': 60,
'dining table': 61,
'toilet': 62,
'laptop': 64,
'mouse': 65,
'remote': 66,
'keyboard': 67,
'microwave': 69,
'oven': 70,
'toaster': 71,
'sink': 72,
'refrigerator': 73,
'book': 74,
'clock': 75,
'vase': 76,
'scissors': 77,
'toothbrush': 80,
'teddy bear': 78,
'hair drier': 79,
'tv monitor': 63,
'cell phone': 68,
'hot dog': 53,
'wine glass': 41,
'tennis racket': 39,
'baseball bat': 35,
'baseball glove': 36,
'sports ball': 33,
'traffic light': 10,
'fire hydrant': 11,
'stop sign': 12,
'parking meter': 13
}
coco_class_dic_withspaces = {
'teddy': -1, 'bear': 78,
'hair': -1, 'drier': 79,
'tv': -1, 'monitor': 63,
'cell': -1, 'phone': 68,
'hot': -1, 'dog': 53,
'wine': -1, 'glass': 41,
'tennis': -1, 'racket': 39,
'baseball': -1, 'bat': 35, 'glove': 36,
'sports': -1, 'ball': 33,
'traffic': -1, 'light': 10,
'fire': -1, 'hydrant': 11,
'stop': -1, 'sign': 12,
'parking': -1, 'meter': 13,
'dining': -1, 'table': 61,
'potted': -1, 'plant': 59
}
def get_center(x1,y1,x2,y2):
cX = int((x1 + x2) / 2.0)
cY = int((y1 + y2) / 2.0)
return cX, cY
def get_index(xc,yc,trackedobjects):
index=0
for (objectID, centroid) in trackedobjects.items():
x = centroid[0]
y = centroid[1]
if x==xc and y==yc:
return objectID
return -1
class CentroidTracker():
def __init__(self, maxDisappeared=50):
# initialize the next unique object ID along with two ordered
# dictionaries used to keep track of mapping a given object
# ID to its centroid and number of consecutive frames it has
# been marked as "disappeared", respectively
self.nextObjectID = 0
self.objects = OrderedDict()
self.disappeared = OrderedDict()
# store the number of maximum consecutive frames a given
# object is allowed to be marked as "disappeared" until we
# need to deregister the object from tracking
self.maxDisappeared = maxDisappeared
def deregister(self, objectID):
# to deregister an object ID we delete the object ID from
# both of our respective dictionaries
del self.objects[objectID]
del self.disappeared[objectID]
def register(self, centroid):
# when registering an object we use the next available object
# ID to store the centroid
self.objects[self.nextObjectID] = centroid
self.disappeared[self.nextObjectID] = 0
self.nextObjectID += 1
def update(self, rects):
# check to see if the list of input bounding box rectangles
# is empty
if len(rects) == 0:
# loop over any existing tracked objects and mark them
# as disappeared
for objectID in list(self.disappeared.keys()):
self.disappeared[objectID] += 1
# if we have reached a maximum number of consecutive
# frames where a given object has been marked as
# missing, deregister it
if self.disappeared[objectID] > self.maxDisappeared:
self.deregister(objectID)
# return early as there are no centroids or tracking info
# to update
return self.objects
# initialize an array of input centroids for the current frame
inputCentroids = np.zeros((len(rects), 2), dtype="int")
# loop over the bounding box rectangles
for (i, (startX, startY, endX, endY)) in enumerate(rects):
# use the bounding box coordinates to derive the centroid
cX = int((startX + endX) / 2.0)
cY = int((startY + endY) / 2.0)
inputCentroids[i] = (cX, cY)
# if we are currently not tracking any objects take the input
# centroids and register each of them
if len(self.objects) == 0:
for i in range(0, len(inputCentroids)):
self.register(inputCentroids[i])
# centroids
# otherwise, are are currently tracking objects so we need to
# try to match the input centroids to existing object
# centroids
else:
# grab the set of object IDs and corresponding centroids
objectIDs = list(self.objects.keys())
objectCentroids = list(self.objects.values())
# compute the distance between each pair of object
# centroids and input centroids, respectively -- our
# goal will be to match an input centroid to an existing
# object centroid
D = dist.cdist(np.array(objectCentroids), inputCentroids)
# in order to perform this matching we must (1) find the
# smallest value in each row and then (2) sort the row
# indexes based on their minimum values so that the row
# with the smallest value is at the *front* of the index
# list
rows = D.min(axis=1).argsort()
# next, we perform a similar process on the columns by
# finding the smallest value in each column and then
# sorting using the previously computed row index list
cols = D.argmin(axis=1)[rows]
# in order to determine if we need to update, register,
# or deregister an object we need to keep track of which
# of the rows and column indexes we have already examined
usedRows = set()
usedCols = set()
# loop over the combination of the (row, column) index
# tuples
for (row, col) in zip(rows, cols):
# if we have already examined either the row or
# column value before, ignore it
# val
if row in usedRows or col in usedCols:
continue
# otherwise, grab the object ID for the current row,
# set its new centroid, and reset the disappeared
# counter
objectID = objectIDs[row]
self.objects[objectID] = inputCentroids[col]
self.disappeared[objectID] = 0
# indicate that we have examined each of the row and
# column indexes, respectively
usedRows.add(row)
usedCols.add(col)
# compute both the row and column index we have NOT yet
# examined
unusedRows = set(range(0, D.shape[0])).difference(usedRows)
unusedCols = set(range(0, D.shape[1])).difference(usedCols)
# in the event that the number of object centroids is
# equal or greater than the number of input centroids
# we need to check and see if some of these objects have
# potentially disappeared
if D.shape[0] >= D.shape[1]:
# loop over the unused row indexes
for row in unusedRows:
# grab the object ID for the corresponding row
# index and increment the disappeared counter
objectID = objectIDs[row]
self.disappeared[objectID] += 1
# check to see if the number of consecutive
# frames the object has been marked "disappeared"
# for warrants deregistering the object
if self.disappeared[objectID] > self.maxDisappeared:
self.deregister(objectID)
# otherwise, if the number of input centroids is greater
# than the number of existing object centroids we need to
# register each new input centroid as a trackable object
else:
for col in unusedCols:
self.register(inputCentroids[col])
# return the set of trackable objects
return self.objects
# functons :
def get_all_coco_class_names(input): # input is a list#this is the funtion needed to extract in main project
# splitting by individual objects :
# this will store all objects individualy :
total_raw_input = input.split() # total_raw_input is a list
print('FROM get_all_coco_class_names :-> \n the given input was : ' + str(total_raw_input))
# new object holder :
objects = {'obj1': 0}
# objects = ['obj1']
objects.pop('obj1') # poping a temp variable
# now we need to check if these objects are in the coco class :
for index, object in enumerate(total_raw_input):
# check to see if the word is in the diconary with spaces :
if coco_class_dic_withspaces.get(object, 0) < 0:
# it is one of the words with spaces : checking the index right after the current one
# need to cater for out of bounds in this segment :
if coco_class_dic_withspaces.get(total_raw_input[index + 1], 0) > 0:
# the next word is in the dic so we need to :
# pop both words and concatinate as one with spaces :
holder = str(object) + " " + str(total_raw_input[index + 1])
# print(holder)
# add it to the list of words :
objects[holder] = coco_class_dic_withspaces.get(total_raw_input[index + 1], 0)
# objects.append(holder)
# checking the word in the regular dic :
elif coco_class_dic_with_index.get(object, 0) > 0:
# the object was found in class
# if the objects class name is with spacing then pop the firs one and add it in the next index :
# print(object)
objects[object] = coco_class_dic_with_index.get(object, 0)
# objects.append(object) # saving the word found in the list in the finial | |
json=_json,
)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
response_headers["Azure-AsyncOperation"] = self._deserialize(
"str", response.headers.get("Azure-AsyncOperation")
)
response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
if cls:
return cls(pipeline_response, None, response_headers)
@distributed_trace_async
async def begin_post_async_relative_retry400(self, product: JSONType = None, **kwargs: Any) -> AsyncLROPoller[None]:
"""Long running post request, service returns a 202 to the initial request Poll the endpoint
indicated in the Azure-AsyncOperation header for operation status.
:param product: Product to put.
:type product: JSONType
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns None
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# JSON input template you can fill out and use as your body input.
product = {
"id": "str", # Optional. Resource Id.
"location": "str", # Optional. Resource Location.
"name": "str", # Optional. Resource Name.
"properties": {
"provisioningState": "str", # Optional.
"provisioningStateValues": "str" # Optional. Possible values
include: "Succeeded", "Failed", "canceled", "Accepted", "Creating",
"Created", "Updating", "Updated", "Deleting", "Deleted", "OK".
},
"tags": {
"str": "str" # Optional. A set of tags. Dictionary of
:code:`<string>`.
},
"type": "str" # Optional. Resource Type.
}
"""
content_type = kwargs.pop("content_type", "application/json") # type: Optional[str]
polling = kwargs.pop("polling", True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop("cls", None) # type: ClsType[None]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = await self._post_async_relative_retry400_initial(
product=product, content_type=content_type, cls=lambda x, y, z: x, **kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False:
polling_method = AsyncNoPolling()
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
async def _put_error201_no_provisioning_state_payload_initial(
self, product: JSONType = None, **kwargs: Any
) -> JSONType:
cls = kwargs.pop("cls", None) # type: ClsType[JSONType]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
content_type = kwargs.pop("content_type", "application/json") # type: Optional[str]
if product is not None:
_json = product
else:
_json = None
request = build_lrosads_put_error201_no_provisioning_state_payload_request_initial(
content_type=content_type,
json=_json,
)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
if response.content:
deserialized = response.json()
else:
deserialized = None
if response.status_code == 201:
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
@distributed_trace_async
async def begin_put_error201_no_provisioning_state_payload(
self, product: JSONType = None, **kwargs: Any
) -> AsyncLROPoller[JSONType]:
"""Long running put request, service returns a 201 to the initial request with no payload.
:param product: Product to put.
:type product: JSONType
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns JSON object
:rtype: ~azure.core.polling.AsyncLROPoller[JSONType]
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# JSON input template you can fill out and use as your body input.
product = {
"id": "str", # Optional. Resource Id.
"location": "str", # Optional. Resource Location.
"name": "str", # Optional. Resource Name.
"properties": {
"provisioningState": "str", # Optional.
"provisioningStateValues": "str" # Optional. Possible values
include: "Succeeded", "Failed", "canceled", "Accepted", "Creating",
"Created", "Updating", "Updated", "Deleting", "Deleted", "OK".
},
"tags": {
"str": "str" # Optional. A set of tags. Dictionary of
:code:`<string>`.
},
"type": "str" # Optional. Resource Type.
}
# response body for status code(s): 200, 201
response.json() == {
"id": "str", # Optional. Resource Id.
"location": "str", # Optional. Resource Location.
"name": "str", # Optional. Resource Name.
"properties": {
"provisioningState": "str", # Optional.
"provisioningStateValues": "str" # Optional. Possible values
include: "Succeeded", "Failed", "canceled", "Accepted", "Creating",
"Created", "Updating", "Updated", "Deleting", "Deleted", "OK".
},
"tags": {
"str": "str" # Optional. A set of tags. Dictionary of
:code:`<string>`.
},
"type": "str" # Optional. Resource Type.
}
"""
content_type = kwargs.pop("content_type", "application/json") # type: Optional[str]
polling = kwargs.pop("polling", True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop("cls", None) # type: ClsType[JSONType]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = await self._put_error201_no_provisioning_state_payload_initial(
product=product, content_type=content_type, cls=lambda x, y, z: x, **kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False:
polling_method = AsyncNoPolling()
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
async def _put_async_relative_retry_no_status_initial(self, product: JSONType = None, **kwargs: Any) -> JSONType:
cls = kwargs.pop("cls", None) # type: ClsType[JSONType]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
content_type = kwargs.pop("content_type", "application/json") # type: Optional[str]
if product is not None:
_json = product
else:
_json = None
request = build_lrosads_put_async_relative_retry_no_status_request_initial(
content_type=content_type,
json=_json,
)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
response_headers["Azure-AsyncOperation"] = self._deserialize(
"str", response.headers.get("Azure-AsyncOperation")
)
response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
@distributed_trace_async
async def begin_put_async_relative_retry_no_status(
self, product: JSONType = None, **kwargs: Any
) -> AsyncLROPoller[JSONType]:
"""Long running put request, service returns a 200 to the initial request, with an entity that
contains ProvisioningState=’Creating’. Poll the endpoint indicated in the Azure-AsyncOperation
header for operation status.
:param product: Product to put.
:type product: JSONType
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns JSON object
:rtype: ~azure.core.polling.AsyncLROPoller[JSONType]
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# JSON input template you can fill out and use as your body input.
product = {
"id": "str", # Optional. Resource Id.
"location": "str", # Optional. Resource Location.
"name": "str", # Optional. Resource Name.
"properties": {
"provisioningState": "str", # Optional.
"provisioningStateValues": "str" # Optional. Possible values
include: "Succeeded", "Failed", "canceled", "Accepted", "Creating",
"Created", "Updating", "Updated", "Deleting", "Deleted", "OK".
},
"tags": {
"str": "str" # Optional. A set of tags. Dictionary of
:code:`<string>`.
},
"type": "str" # Optional. Resource Type.
}
# response body for status code(s): 200
response.json() == {
"id": "str", # Optional. Resource Id.
"location": "str", # Optional. Resource Location.
"name": "str", # Optional. Resource Name.
"properties": {
"provisioningState": "str", # Optional.
"provisioningStateValues": "str" # Optional. Possible values
include: "Succeeded", "Failed", "canceled", "Accepted", "Creating",
"Created", "Updating", "Updated", "Deleting", "Deleted", "OK".
},
"tags": {
"str": "str" # Optional. A set of tags. Dictionary of
:code:`<string>`.
},
"type": "str" # Optional. Resource Type.
}
"""
content_type = kwargs.pop("content_type", "application/json") # type: Optional[str]
polling = kwargs.pop("polling", True) | |
Requires our :ref:`mypy plugin <mypy-plugins>`.
Read more about async and sync functions:
https://journal.stuffwithstuff.com/2015/02/01/what-color-is-your-function/
"""
@wraps(function)
async def decorator(*args, **kwargs):
return function(*args, **kwargs)
return decorator
# FutureResult
# ============
@final
class FutureResult(BaseContainer, Generic[_ValueType, _ErrorType]):
"""
Container to easily compose ``async`` functions.
Represents a better abstraction over a simple coroutine.
Is framework, event-loop, and IO-library agnostics.
Works with ``asyncio``, ``curio``, ``trio``, or any other tool.
Internally we use ``anyio`` to test
that it works as expected for any io stack.
Note that ``FutureResult[a, b]`` represents a computation
that can fail and returns ``IOResult[a, b]`` type.
Use ``Future[a]`` for operations that cannot fail.
This is a ``Future`` that returns ``Result`` type.
By providing this utility type we make developers' lifes easier.
``FutureResult`` has a lot of composition helpers
to turn complex nested operations into a one function calls.
Tradeoffs
~~~~~~~~~
Due to possible performance issues we move all coroutines definitions
to a separate module.
See also:
https://gcanti.github.io/fp-ts/modules/TaskEither.ts.html
https://zio.dev/docs/overview/overview_basic_concurrency
"""
outer: ClassVar[Type[Future]] = Future
_inner_value: Awaitable[Result[_ValueType, _ErrorType]]
def __init__(
self,
inner_value: Awaitable[Result[_ValueType, _ErrorType]],
) -> None:
"""
Public constructor for this type. Also required for typing.
.. code:: python
>>> import anyio
>>> from returns.future import FutureResult
>>> from returns.io import IOSuccess
>>> from returns.result import Success, Result
>>> async def coro(arg: int) -> Result[int, str]:
... return Success(arg + 1)
>>> container = FutureResult(coro(1))
>>> assert anyio.run(container.awaitable) == IOSuccess(2)
"""
super().__init__(inner_value)
def __await__(self) -> Generator[
Any, Any, IOResult[_ValueType, _ErrorType],
]:
"""
By defining this magic method we make ``FutureResult`` awaitable.
This means you can use ``await`` keyword to evaluate this container:
.. code:: python
>>> import anyio
>>> from returns.future import FutureResult
>>> from returns.io import IOSuccess, IOResult
>>> async def main() -> IOResult[int, str]:
... return await FutureResult.from_value(1)
>>> assert anyio.run(main) == IOSuccess(1)
When awaited we returned the value wrapped
in :class:`returns.io.IOResult` container
to indicate that the computation was impure and can fail.
See also:
https://docs.python.org/3/library/asyncio-task.html#awaitables
https://www.python.org/dev/peps/pep-0492/#new-abstract-base-classes
"""
return self.awaitable().__await__() # noqa: WPS609
async def awaitable(self) -> IOResult[_ValueType, _ErrorType]:
"""
Transforms ``FutureResult[a, b]`` to ``Awaitable[IOResult[a, b]]``.
Use this method when you need a real coroutine.
Like for ``asyncio.run`` calls.
Note, that returned value will be wrapped
in :class:`returns.io.IOResult` container.
.. code:: python
>>> import anyio
>>> from returns.future import FutureResult
>>> from returns.io import IOSuccess
>>> assert anyio.run(
... FutureResult.from_value(1).awaitable,
... ) == IOSuccess(1)
"""
return IOResult.from_result(await self._inner_value)
def map( # noqa: WPS125
self,
function: Callable[[_ValueType], _NewValueType],
) -> 'FutureResult[_NewValueType, _ErrorType]':
"""
Applies function to the inner value.
Applies 'function' to the contents of the IO instance
and returns a new ``FutureResult`` object containing the result.
'function' should accept a single "normal" (non-container) argument
and return a non-container result.
.. code:: python
>>> import anyio
>>> from returns.future import FutureResult
>>> from returns.io import IOSuccess, IOFailure
>>> def mappable(x: int) -> int:
... return x + 1
>>> assert anyio.run(
... FutureResult.from_value(1).map(mappable).awaitable,
... ) == IOSuccess(2)
>>> assert anyio.run(
... FutureResult.from_failure(1).map(mappable).awaitable,
... ) == IOFailure(1)
"""
return FutureResult(_future_result.async_map(
function, self._inner_value,
))
def apply(
self,
container:
'FutureResult[Callable[[_ValueType], _NewValueType], _ErrorType]',
) -> 'FutureResult[_NewValueType, _ErrorType]':
"""
Calls a wrapped function in a container on this container.
.. code:: python
>>> import anyio
>>> from returns.future import FutureResult
>>> from returns.io import IOSuccess, IOFailure
>>> def appliable(x: int) -> int:
... return x + 1
>>> assert anyio.run(
... FutureResult.from_value(1).apply(
... FutureResult.from_value(appliable),
... ).awaitable,
... ) == IOSuccess(2)
>>> assert anyio.run(
... FutureResult.from_failure(1).apply(
... FutureResult.from_value(appliable),
... ).awaitable,
... ) == IOFailure(1)
>>> assert isinstance(anyio.run(
... FutureResult.from_value(1).apply(
... FutureResult.from_failure(appliable),
... ).awaitable,
... ), IOResult.failure_type)
"""
return FutureResult(_future_result.async_apply(
container, self._inner_value,
))
def bind(
self,
function: Callable[
[_ValueType],
'FutureResult[_NewValueType, _ErrorType]',
],
) -> 'FutureResult[_NewValueType, _ErrorType]':
"""
Applies 'function' to the result of a previous calculation.
'function' should accept a single "normal" (non-container) argument
and return ``Future`` type object.
.. code:: python
>>> import anyio
>>> from returns.future import FutureResult
>>> from returns.io import IOSuccess, IOFailure
>>> def bindable(x: int) -> FutureResult[int, str]:
... return FutureResult.from_value(x + 1)
>>> assert anyio.run(
... FutureResult.from_value(1).bind(bindable).awaitable,
... ) == IOSuccess(2)
>>> assert anyio.run(
... FutureResult.from_failure(1).bind(bindable).awaitable,
... ) == IOFailure(1)
"""
return FutureResult(_future_result.async_bind(
function, self._inner_value,
))
def bind_async(
self,
function: Callable[
[_ValueType],
Awaitable['FutureResult[_NewValueType, _ErrorType]'],
],
) -> 'FutureResult[_NewValueType, _ErrorType]':
"""
Composes a container and ``async`` function returning container.
This function should return a container value.
See :meth:`~FutureResult.bind_awaitable`
to bind ``async`` function that returns a plain value.
.. code:: python
>>> import anyio
>>> from returns.future import FutureResult
>>> from returns.io import IOSuccess, IOFailure
>>> async def coroutine(x: int) -> FutureResult[str, int]:
... return FutureResult.from_value(str(x + 1))
>>> assert anyio.run(
... FutureResult.from_value(1).bind_async(coroutine).awaitable,
... ) == IOSuccess('2')
>>> assert anyio.run(
... FutureResult.from_failure(1).bind_async(coroutine).awaitable,
... ) == IOFailure(1)
"""
return FutureResult(_future_result.async_bind_async(
function, self._inner_value,
))
def bind_awaitable(
self,
function: Callable[[_ValueType], Awaitable[_NewValueType]],
) -> 'FutureResult[_NewValueType, _ErrorType]':
"""
Allows to compose a container and a regular ``async`` function.
This function should return plain, non-container value.
See :meth:`~FutureResult.bind_async`
to bind ``async`` function that returns a container.
.. code:: python
>>> import anyio
>>> from returns.future import FutureResult
>>> from returns.io import IOSuccess, IOFailure
>>> async def coro(x: int) -> int:
... return x + 1
>>> assert anyio.run(
... FutureResult.from_value(1).bind_awaitable(coro).awaitable,
... ) == IOSuccess(2)
>>> assert anyio.run(
... FutureResult.from_failure(1).bind_awaitable(coro).awaitable,
... ) == IOFailure(1)
"""
return FutureResult(_future_result.async_bind_awaitable(
function, self._inner_value,
))
def bind_result(
self,
function: Callable[[_ValueType], Result[_NewValueType, _ErrorType]],
) -> 'FutureResult[_NewValueType, _ErrorType]':
"""
Binds a function returning ``Result[a, b]`` container.
.. code:: python
>>> import anyio
>>> from returns.io import IOSuccess, IOFailure
>>> from returns.result import Result, Success
>>> from returns.future import FutureResult
>>> def bind(inner_value: int) -> Result[int, str]:
... return Success(inner_value + 1)
>>> assert anyio.run(
... FutureResult.from_value(1).bind_result(bind).awaitable,
... ) == IOSuccess(2)
>>> assert anyio.run(
... FutureResult.from_failure('a').bind_result(bind).awaitable,
... ) == IOFailure('a')
"""
return FutureResult(_future_result.async_bind_result(
function, self._inner_value,
))
def bind_ioresult(
self,
function: Callable[[_ValueType], IOResult[_NewValueType, _ErrorType]],
) -> 'FutureResult[_NewValueType, _ErrorType]':
"""
Binds a function returning ``IOResult[a, b]`` container.
.. code:: python
>>> import anyio
>>> from returns.io import IOResult, IOSuccess, IOFailure
>>> from returns.future import FutureResult
>>> def bind(inner_value: int) -> IOResult[int, str]:
... return IOSuccess(inner_value + 1)
>>> assert anyio.run(
... FutureResult.from_value(1).bind_ioresult(bind).awaitable,
... ) == IOSuccess(2)
>>> assert anyio.run(
... FutureResult.from_failure('a').bind_ioresult(bind).awaitable,
... ) == IOFailure('a')
"""
return FutureResult(_future_result.async_bind_ioresult(
function, self._inner_value,
))
def bind_io(
self,
function: Callable[[_ValueType], IO[_NewValueType]],
) -> 'FutureResult[_NewValueType, _ErrorType]':
"""
Binds a function returning ``IO[a]`` container.
.. code:: python
>>> import anyio
>>> from returns.io import IO, IOSuccess, IOFailure
>>> from returns.future import FutureResult
>>> def bind(inner_value: int) -> IO[float]:
... return IO(inner_value + 0.5)
>>> assert anyio.run(
... FutureResult.from_value(1).bind_io(bind).awaitable,
... ) == IOSuccess(1.5)
>>> assert anyio.run(
... FutureResult.from_failure(1).bind_io(bind).awaitable,
... ) == IOFailure(1)
"""
return FutureResult(_future_result.async_bind_io(
function, self._inner_value,
))
def bind_future(
self,
function: Callable[[_ValueType], Future[_NewValueType]],
) -> 'FutureResult[_NewValueType, _ErrorType]':
"""
Binds a function returning ``Future[a]`` container.
.. code:: python
>>> import anyio
>>> from returns.io import IOSuccess, IOFailure
>>> from returns.future import Future, FutureResult
>>> def bind(inner_value: int) -> Future[float]:
... return Future.from_value(inner_value + 0.5)
>>> assert anyio.run(
... FutureResult.from_value(1).bind_future(bind).awaitable,
... ) == IOSuccess(1.5)
>>> assert anyio.run(
... FutureResult.from_failure(1).bind_future(bind).awaitable,
... ) == IOFailure(1)
"""
return FutureResult(_future_result.async_bind_future(
function, self._inner_value,
))
def bind_async_future(
self,
function: Callable[[_ValueType], Awaitable['Future[_NewValueType]']],
) -> 'FutureResult[_NewValueType, _ErrorType]':
"""
Composes a container and ``async`` function returning ``Future``.
Similar to :meth:`~FutureResult.bind_future`
but works with async functions.
.. code:: python
>>> import anyio
>>> from returns.future import Future, FutureResult
>>> from returns.io import IOSuccess, IOFailure
>>> async def coroutine(x: int) -> Future[str]:
... return Future.from_value(str(x + 1))
>>> assert anyio.run(
... FutureResult.from_value(1).bind_async_future,
... coroutine,
... ) == IOSuccess('2')
>>> assert anyio.run(
... FutureResult.from_failure(1).bind_async,
... coroutine,
... ) == IOFailure(1)
"""
return FutureResult(_future_result.async_bind_async_future(
function, self._inner_value,
))
def unify(
self,
function: Callable[
[_ValueType], 'FutureResult[_NewValueType, _NewErrorType]',
],
) -> 'FutureResult[_NewValueType, Union[_ErrorType, _NewErrorType]]':
"""
Composes successful container with a function that returns a container.
Similar to :meth:`~FutureResult.bind` but has different type.
It returns ``FutureResult[ValueType, Union[ErrorType, NewErrorType]]``
instead of ``FutureResult[ValueType, ErrorType]``.
So, it can be more useful in some situations.
Probably | |
from bisect import bisect_right, bisect_left
from collections import OrderedDict
from enum import IntEnum, Enum
import math
from math import log10, floor
_MINIMUM_R_VALUE = 1e-200
class RenardSeriesKey(Enum):
"""An enumeration of possible Renard series identifiers.
"""
R5 = (5, 0.01)
R10 = (10, 0.01)
R20 = (20, 0.01)
R40 = (40, 0.01)
R80 = (80, 0.01)
RR10 = (10, 0.05)
RR20 = (20, 0.05)
RR40 = (40, 0.05)
RRR5 = (5, 0.5)
RRR10 = (10, 0.1)
RRR20 = (20, 0.1)
def __init__(self, cardinality, precision):
self._cardinality = cardinality
self._precision = precision
@property
def cardinality(self):
return self._cardinality
@property
def precision(self):
return self._precision
R5 = RenardSeriesKey.R5
R10 = RenardSeriesKey.R10
R20 = RenardSeriesKey.R20
R40 = RenardSeriesKey.R40
R80 = RenardSeriesKey.R80
RR10 = RenardSeriesKey.RR10
RR20 = RenardSeriesKey.RR20
RR40 = RenardSeriesKey.RR40
RRR5 = RenardSeriesKey.RRR5
RRR10 = RenardSeriesKey.RRR10
RRR20 = RenardSeriesKey.RRR20
_R = OrderedDict((
(R5, (1.00, 1.60, 2.50, 4.00, 6.30)),
(R10, (1.00, 1.25, 1.60, 2.00, 2.50, 3.15, 4.00, 5.00, 6.30, 8.00)),
(R20, (1.00, 1.12, 1.25, 1.40, 1.60, 1.80, 2.00, 2.24, 2.50, 2.80,
3.15, 3.55, 4.00, 4.50, 5.00, 5.60, 6.30, 7.10, 8.00, 9.00)),
(R40, (1.00, 1.06, 1.12, 1.18, 1.25, 1.32, 1.40, 1.50, 1.60, 1.70,
1.80, 1.90, 2.00, 2.12, 2.24, 2.36, 2.50, 2.65, 2.80, 3.00,
3.15, 3.35, 3.55, 3.75, 4.00, 4.25, 4.50, 4.75, 5.00, 5.30,
5.60, 6.00, 6.30, 6.70, 7.10, 7.50, 8.00, 8.50, 9.00, 9.50)),
(R80, (1.00, 1.03, 1.06, 1.09, 1.12, 1.15, 1.18, 1.22, 1.25, 1.28,
1.32, 1.36, 1.40, 1.45, 1.50, 1.55, 1.60, 1.65, 1.70, 1.75,
1.80, 1.85, 1.90, 1.95, 2.00, 2.06, 2.12, 2.18, 2.24, 2.30,
2.36, 2.43, 2.50, 2.58, 2.65, 2.72, 2.80, 2.90, 3.00, 3.07,
3.15, 3.25, 3.35, 3.45, 3.55, 3.65, 3.75, 3.87, 4.00, 4.12,
4.25, 4.37, 4.50, 4.62, 4.75, 4.87, 5.00, 5.15, 5.30, 5.45,
5.60, 5.80, 6.00, 6.15, 6.30, 6.50, 6.70, 6.90, 7.10, 7.30,
7.50, 7.75, 8.00, 8.25, 8.50, 8.75, 9.00, 9.25, 9.50, 9.75)),
(RR10, (1.00, 1.25, 1.60, 2.00, 2.50, 3.20, 4.00, 5.00, 6.30, 8.00)),
(RR20, (1.00, 1.10, 1.25, 1.40, 1.60, 1.80, 2.00, 2.20, 2.50, 2.80,
3.20, 3.60, 4.00, 4.50, 5.00, 5.60, 6.30, 7.10, 8.00, 9.00)),
(RR40, (1.00, 1.05, 1.10, 1.20, 1.25, 1.30, 1.40, 1.50, 1.60, 1.70,
1.80, 1.90, 2.00, 2.10, 2.20, 2.40, 2.50, 2.60, 2.80, 3.00,
3.20, 3.40, 3.60, 3.80, 4.00, 4.20, 4.50, 4.80, 5.00, 5.30,
5.60, 6.00, 6.30, 6.70, 7.10, 7.50, 8.00, 8.50, 9.00, 9.50)),
(RRR5, (1.0, 1.5, 2.5, 4.0, 6.0)),
(RRR10, (1.0, 1.2, 1.5, 2.0, 2.5, 3.0, 4.0, 5.0, 6.0, 8.0)),
(RRR20, (1.0, 1.1, 1.2, 1.4, 1.6, 1.8, 2.0, 2.2, 2.5, 2.8,
3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 6.0, 7.0, 8.0, 9.0)),
))
def series(series_key):
"""The base values for the given Renard series.
Args:
series_key: An Renard series key such as R20.
Returns:
A tuple of base value for the series. For example, for
R5 the tuple (1.00, 1.60, 2.50, 4.00, 6.30) will be returned.
Raises:
ValueError: If not such series exists.
"""
try:
return _R[series_key]
except KeyError:
raise ValueError("Renard series {} not found. Available Renard series keys are {}"
.format(series_key,
', '.join(str(key.name) for key in series_keys())))
def precision(series_key):
"""The precision for the given Renard series.
Args:
series_key: An Renard series key such as R20.
Returns:
The float multiple to which the base values in the series
have been rounded.
Raises:
ValueError: If not such series exists.
"""
if series_key not in _R:
raise ValueError("Renard series {} not found. Available Renard series keys are {}"
.format(series_key,
', '.join(str(key.name) for key in series_keys())))
return series_key.precision
def series_keys():
"""The available series keys.
Note:
The series keys returned will be members of the RenardSeriesKey enumeration.
These are useful for programmatic use. For constant values consider
using the module aliases R5, R10, R20, etc.
Returns:
A set-like object containing the series-keys.
"""
return _R.keys()
def series_key_from_name(name):
"""Get an RenardSeriesKey from its name.
Args:
name: The series name as a string, for example 'R20'
Returns:
An RenardSeriesKey object which can be uses as a series_key.
Raises:
ValueError: If not such series exists.
"""
try:
return RenardSeriesKey[name]
except KeyError:
raise ValueError("Renard series with name {!r} not found. Available Renard series keys are {}"
.format(name,
', '.join(str(key.name) for key in series_keys())))
LOG10_MANTISSA_E = {num: list(map(lambda x: log10(x) % 1, series)) for num, series in _R.items()}
GEOMETRIC_SCALE_E = {num: max(b/a for a, b in zip(series, series[1:])) for num, series in _R.items()}
def find_greater_than_or_equal(series_key, value):
"""Find the smallest value greater-than or equal-to the given value.
Args:
series_key: An Renard series key such as R20.
value: The query value.
Returns:
The smallest value from the specified series which is greater-than
or equal-to the query value.
Raises:
ValueError: If series_key is not known.
ValueError: If value is not finite.
ValueError: If value is out of range.
"""
candidates = find_nearest_few(series_key, value, num=3)
for candidate in candidates:
if candidate >= value:
return candidate
def find_greater_than(series_key, value):
"""Find the smallest value greater-than or equal-to the given value.
Args:
series_key: An Renard series key such as R20.
value: The query value.
Returns:
The smallest value from the specified series which is greater-than
the query value.
Raises:
ValueError: If series_key is not known.
ValueError: If value is not finite.
ValueError: If value is out of range.
"""
candidates = find_nearest_few(series_key, value, num=3)
for candidate in candidates:
if candidate > value:
return candidate
def find_less_than_or_equal(series_key, value):
"""Find the largest value less-than or equal-to the given value.
Args:
series_key: An Renard series key such as R20.
value: The query value.
Returns:
The largest value from the specified series which is less-than
or equal-to the query value.
Raises:
ValueError: If series_key is not known.
ValueError: If value is not finite.
ValueError: If value is out of range.
"""
candidates = find_nearest_few(series_key, value, num=3)
for candidate in reversed(candidates):
if candidate <= value:
return candidate
def find_less_than(series_key, value):
"""Find the largest value less-than or equal-to the given value.
Args:
series_key: An Renard series key such as R20.
value: The query value.
Returns:
The largest value from the specified series which is less-than
the query value.
Raises:
ValueError: If series_key is not known.
ValueError: If value is not finite.
ValueError: If value is out of range.
"""
candidates = find_nearest_few(series_key, value, num=3)
for candidate in reversed(candidates):
if candidate < value:
return candidate
def find_nearest(series_key, value):
"""Find the nearest value.
Args:
series_key: The RenardSeriesKey to use.
value: The value for which the nearest value is to be found.
Returns:
The value in the specified Renard series closest to value.
Raises:
ValueError: If series_key is not known.
ValueError: If value is not finite.
ValueError: If value is out of range.
"""
return find_nearest_few(series_key, value, num=1)[0]
def find_nearest_few(series_key, value, num=3):
"""Find the nearest values.
Args:
series_key: The RenardSeriesKey to use.
value: The value for which the nearest values are to be found.
num: The number of nearby values to find: 1, 2 or 3.
Returns:
A tuple containing num values. With num == 3 it is guaranteed
that at least one item less than value, and one item greater
than value.
Raises:
ValueError: If series_key is not known.
ValueError: If num is not 1, 2 or 3.
ValueError: If value is not finite.
ValueError: If value is out of range.
"""
if num not in {1, 2, 3}:
raise ValueError("num {} is not 1, 2 or 3".format(num))
start = value / pow(GEOMETRIC_SCALE_E[series_key], 1.5)
stop = value * pow(GEOMETRIC_SCALE_E[series_key], 1.5)
candidates = tuple(rrange(series_key, start, stop))
nearest = _nearest_n(candidates, value, num)
return nearest
def rrange(series_key, start, stop):
"""Generate Renard values in a range inclusive of the start and stop values.
Args:
series_key: The RenardSeriesKey to use.
start: The beginning of the range. The yielded values may include this value.
stop: The end of the range. The yielded values may include this value.
Yields:
Values from the specified range which lie between the start and stop
values inclusively, and in order from lowest to highest.
Raises:
ValueError: If series_key is not known.
ValueError: If start is not less-than or equal-to stop.
ValueError: If start or stop are not both finite.
ValueError: If start or stop are out of range.
"""
if not math.isfinite(start):
raise ValueError("Start value {} is not finite".format(start))
if not math.isfinite(stop):
raise ValueError("Stop value | |
import os
from copy import copy, deepcopy
import dateutil.parser
import pystac
from pystac import (STACError, STACObjectType)
from pystac.link import Link, LinkType
from pystac.stac_object import STACObject
from pystac.utils import (is_absolute_href, make_absolute_href, make_relative_href, datetime_to_str,
str_to_datetime)
from pystac.collection import Collection, Provider
class Item(STACObject):
"""An Item is the core granular entity in a STAC, containing the core metadata
that enables any client to search or crawl online catalogs of spatial 'assets' -
satellite imagery, derived data, DEM's, etc.
Args:
id (str): Provider identifier. Must be unique within the STAC.
geometry (dict): Defines the full footprint of the asset represented by this item,
formatted according to `RFC 7946, section 3.1 (GeoJSON)
<https://tools.ietf.org/html/rfc7946>`_.
bbox (List[float] or None): Bounding Box of the asset represented by this item using
either 2D or 3D geometries. The length of the array must be 2*n where n is the
number of dimensions. Could also be None in the case of a null geometry.
datetime (datetime or None): Datetime associated with this item. If None,
a start_datetime and end_datetime must be supplied in the properties.
properties (dict): A dictionary of additional metadata for the item.
stac_extensions (List[str]): Optional list of extensions the Item implements.
href (str or None): Optional HREF for this item, which be set as the item's
self link's HREF.
collection (Collection or str): The Collection or Collection ID that this item
belongs to.
extra_fields (dict or None): Extra fields that are part of the top-level JSON properties
of the Item.
Attributes:
id (str): Provider identifier. Unique within the STAC.
geometry (dict): Defines the full footprint of the asset represented by this item,
formatted according to `RFC 7946, section 3.1 (GeoJSON)
<https://tools.ietf.org/html/rfc7946>`_.
bbox (List[float] or None): Bounding Box of the asset represented by this item using
either 2D or 3D geometries. The length of the array is 2*n where n is the
number of dimensions. Could also be None in the case of a null geometry.
datetime (datetime or None): Datetime associated with this item. If None,
the start_datetime and end_datetime in the common_metadata
will supply the datetime range of the Item.
properties (dict): A dictionary of additional metadata for the item.
stac_extensions (List[str] or None): Optional list of extensions the Item implements.
collection (Collection or None): Collection that this item is a part of.
links (List[Link]): A list of :class:`~pystac.Link` objects representing
all links associated with this STACObject.
assets (Dict[str, Asset]): Dictionary of asset objects that can be downloaded,
each with a unique key.
collection_id (str or None): The Collection ID that this item belongs to, if any.
extra_fields (dict or None): Extra fields that are part of the top-level JSON properties
of the Item.
"""
STAC_OBJECT_TYPE = STACObjectType.ITEM
def __init__(self,
id,
geometry,
bbox,
datetime,
properties,
stac_extensions=None,
href=None,
collection=None,
extra_fields=None):
super().__init__(stac_extensions)
self.id = id
self.geometry = geometry
self.bbox = bbox
self.datetime = datetime
self.properties = properties
if extra_fields is None:
self.extra_fields = {}
else:
self.extra_fields = extra_fields
self.assets = {}
if datetime is None:
if 'start_datetime' not in properties or \
'end_datetime' not in properties:
raise STACError('Invalid Item: If datetime is None, '
'a start_datetime and end_datetime '
'must be supplied in '
'the properties.')
if href is not None:
self.set_self_href(href)
if collection is not None:
if isinstance(collection, Collection):
self.set_collection(collection)
else:
self.collection_id = collection
else:
self.collection_id = None
def __repr__(self):
return '<Item id={}>'.format(self.id)
def get_datetime(self, asset=None):
"""Gets an Item or an Asset datetime.
If an Asset is supplied and the Item property exists on the Asset,
returns the Asset's value. Otherwise returns the Item's value.
Returns:
datetime or None
"""
if asset is None or 'datetime' not in asset.properties:
return self.datetime
else:
return str_to_datetime(asset.properties.get('datetime'))
def set_datetime(self, datetime, asset=None):
"""Set an Item or an Asset datetime.
If an Asset is supplied, sets the property on the Asset.
Otherwise sets the Item's value.
"""
if asset is None:
self.datetime = datetime
else:
asset.properties['datetime'] = datetime_to_str(datetime)
def get_assets(self):
"""Get this item's assets.
Returns:
Dict[str, Asset]: A copy of the dictionary of this item's assets.
"""
return dict(self.assets.items())
def add_asset(self, key, asset):
"""Adds an Asset to this item.
Args:
key (str): The unique key of this asset.
asset (Asset): The Asset to add.
"""
asset.set_owner(self)
self.assets[key] = asset
return self
def make_asset_hrefs_relative(self):
"""Modify each asset's HREF to be relative to this item's self HREF.
Returns:
Item: self
"""
self_href = self.get_self_href()
if self_href is None:
raise STACError('Cannot make asset HREFs relative if no self_href is set.')
for asset in self.assets.values():
asset.href = make_relative_href(asset.href, self_href)
return self
def make_asset_hrefs_absolute(self):
"""Modify each asset's HREF to be absolute.
Any asset HREFs that are relative will be modified to absolute based on this
item's self HREF.
Returns:
Item: self
"""
self_href = None
for asset in self.assets.values():
href = asset.href
if not is_absolute_href(href):
if self_href is None:
self_href = self.get_self_href()
if self_href is None:
raise STACError('Cannot make relative asset HREFs absolute '
'if no self_href is set.')
asset.href = make_absolute_href(asset.href, self_href)
return self
def set_collection(self, collection, link_type=None):
"""Set the collection of this item.
This method will replace any existing Collection link and attribute for
this item.
Args:
collection (Collection): The collection to set as this item's collection.
link_type (str): the link type to use for the collection link.
One of :class:`~pystac.LinkType`.
Returns:
Item: self
"""
if not link_type:
prev = self.get_single_link('collection')
if prev is not None:
link_type = prev.link_type
else:
link_type = LinkType.ABSOLUTE
self.remove_links('collection')
self.add_link(Link.collection(collection, link_type=link_type))
self.collection_id = collection.id
return self
def to_dict(self, include_self_link=True):
links = self.links
if not include_self_link:
links = filter(lambda x: x.rel != 'self', links)
assets = dict(map(lambda x: (x[0], x[1].to_dict()), self.assets.items()))
if self.datetime is not None:
self.properties['datetime'] = datetime_to_str(self.datetime)
else:
self.properties['datetime'] = None
d = {
'type': 'Feature',
'stac_version': pystac.get_stac_version(),
'id': self.id,
'properties': self.properties,
'geometry': self.geometry,
'links': [link.to_dict() for link in links],
'assets': assets
}
if self.bbox is not None:
d['bbox'] = self.bbox
if self.stac_extensions is not None:
d['stac_extensions'] = self.stac_extensions
if self.collection_id:
d['collection'] = self.collection_id
for key in self.extra_fields:
d[key] = self.extra_fields[key]
return deepcopy(d)
def clone(self):
clone = Item(id=self.id,
geometry=deepcopy(self.geometry),
bbox=copy(self.bbox),
datetime=copy(self.datetime),
properties=deepcopy(self.properties),
stac_extensions=deepcopy(self.stac_extensions),
collection=self.collection_id)
for link in self.links:
clone.add_link(link.clone())
clone.assets = dict([(k, a.clone()) for (k, a) in self.assets.items()])
return clone
def _object_links(self):
return ['collection'] + (pystac.STAC_EXTENSIONS.get_extended_object_links(self))
def normalize_hrefs(self, root_href):
if not is_absolute_href(root_href):
root_href = make_absolute_href(root_href, os.getcwd(), start_is_dir=True)
old_self_href = self.get_self_href()
new_self_href = os.path.join(root_href, '{}.json'.format(self.id))
self.set_self_href(new_self_href)
# Make sure relative asset links remain valid.
# This will only work if there is a self href set.
for asset in self.assets.values():
asset_href = asset.href
if not is_absolute_href(asset_href):
if old_self_href is not None:
abs_href = make_absolute_href(asset_href, old_self_href)
new_relative_href = make_relative_href(abs_href, new_self_href)
asset.href = new_relative_href
def fully_resolve(self):
link_rels = set(self._object_links())
for link in self.links:
if link.rel in link_rels:
if not link.is_resolved():
link.resolve_stac_object(root=self.get_root())
@classmethod
def from_dict(cls, d, href=None, root=None):
d = deepcopy(d)
id = d.pop('id')
geometry = d.pop('geometry')
properties = d.pop('properties')
bbox = d.pop('bbox', None)
stac_extensions = d.get('stac_extensions')
collection_id = d.pop('collection', None)
datetime = properties.get('datetime')
if datetime is not None:
datetime = dateutil.parser.parse(datetime)
links = d.pop('links')
assets = d.pop('assets')
d.pop('type')
d.pop('stac_version')
item = Item(id=id,
geometry=geometry,
bbox=bbox,
datetime=datetime,
properties=properties,
stac_extensions=stac_extensions,
collection=collection_id,
extra_fields=d)
has_self_link = False
for link in links:
has_self_link |= link['rel'] == 'self'
item.add_link(Link.from_dict(link))
if not has_self_link and href is not None:
item.add_link(Link.self_href(href))
for k, v in assets.items():
asset = Asset.from_dict(v)
asset.set_owner(item)
item.assets[k] = asset
return item
@property
def common_metadata(self):
"""Access the item's common metadat fields as a CommonMetadata object
Returns:
CommonMetada: contains all common metadata fields in the items properties
"""
return CommonMetadata(self.properties)
class Asset:
"""An object that contains a link to data associated with the Item that can be
downloaded or streamed.
Args:
href (str): Link to the asset object. Relative and absolute links are both allowed.
title (str): Optional displayed title for clients and users.
description (str): A description of the Asset providing additional details, such as
how it was processed or created. CommonMark 0.29 syntax MAY be used for rich
text representation.
media_type (str): Optional description of the media type. Registered Media | |
padding otherwise.
Static padding is necessary for ONNX exporting of models. """
if image_size is None:
return Conv2dDynamicSamePadding
else:
return partial(Conv2dStaticSamePadding, image_size=image_size)
class Conv2dDynamicSamePadding(nn.Conv2d):
""" 2D Convolutions like TensorFlow, for a dynamic image size """
def __init__(self, in_channels, out_channels, kernel_size, stride=1, dilation=1, groups=1, bias=True):
super().__init__(in_channels, out_channels, kernel_size, stride, 0, dilation, groups, bias)
self.stride = self.stride if len(self.stride) == 2 else [self.stride[0]] * 2
def forward(self, x):
ih, iw = x.size()[-2:]
kh, kw = self.weight.size()[-2:]
sh, sw = self.stride
oh, ow = math.ceil(ih / sh), math.ceil(iw / sw)
pad_h = max((oh - 1) * self.stride[0] + (kh - 1) * self.dilation[0] + 1 - ih, 0)
pad_w = max((ow - 1) * self.stride[1] + (kw - 1) * self.dilation[1] + 1 - iw, 0)
if pad_h > 0 or pad_w > 0:
x = F.pad(x, [pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2])
return F.conv2d(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
class Conv2dStaticSamePadding(nn.Conv2d):
""" 2D Convolutions like TensorFlow, for a fixed image size"""
def __init__(self, in_channels, out_channels, kernel_size, image_size=None, **kwargs):
super().__init__(in_channels, out_channels, kernel_size, **kwargs)
self.stride = self.stride if len(self.stride) == 2 else [self.stride[0]] * 2
# Calculate padding based on image size and save it
assert image_size is not None
ih, iw = image_size if type(image_size) == list else [image_size, image_size]
kh, kw = self.weight.size()[-2:]
sh, sw = self.stride
oh, ow = math.ceil(ih / sh), math.ceil(iw / sw)
pad_h = max((oh - 1) * self.stride[0] + (kh - 1) * self.dilation[0] + 1 - ih, 0)
pad_w = max((ow - 1) * self.stride[1] + (kw - 1) * self.dilation[1] + 1 - iw, 0)
if pad_h > 0 or pad_w > 0:
self.static_padding = nn.ZeroPad2d((pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2))
else:
self.static_padding = Identity()
def forward(self, x):
x = self.static_padding(x)
x = F.conv2d(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
return x
class Identity(nn.Module):
def __init__(self, ):
super(Identity, self).__init__()
def forward(self, input):
return input
########################################################################
############## HELPERS FUNCTIONS FOR LOADING MODEL PARAMS ##############
########################################################################
def efficientnet_params(model_name):
""" Map EfficientNet model name to parameter coefficients. """
params_dict = {
# Coefficients: width,depth,res,dropout
'efficientnet-b0': (1.0, 1.0, 224, 0.2),
'efficientnet-b1': (1.0, 1.1, 240, 0.2),
'efficientnet-b2': (1.1, 1.2, 260, 0.3),
'efficientnet-b3': (1.2, 1.4, 300, 0.3),
'efficientnet-b4': (1.4, 1.8, 380, 0.4),
'efficientnet-b5': (1.6, 2.2, 456, 0.4),
'efficientnet-b6': (1.8, 2.6, 528, 0.5),
'efficientnet-b7': (2.0, 3.1, 600, 0.5),
'efficientnet-b8': (2.2, 3.6, 672, 0.5),
'efficientnet-l2': (4.3, 5.3, 800, 0.5),
}
return params_dict[model_name]
class BlockDecoder(object):
""" Block Decoder for readability, straight from the official TensorFlow repository """
@staticmethod
def _decode_block_string(block_string):
""" Gets a block through a string notation of arguments. """
assert isinstance(block_string, str)
ops = block_string.split('_')
options = {}
for op in ops:
splits = re.split(r'(\d.*)', op)
if len(splits) >= 2:
key, value = splits[:2]
options[key] = value
# Check stride
assert (('s' in options and len(options['s']) == 1) or
(len(options['s']) == 2 and options['s'][0] == options['s'][1]))
return BlockArgs(
kernel_size=int(options['k']),
num_repeat=int(options['r']),
input_filters=int(options['i']),
output_filters=int(options['o']),
expand_ratio=int(options['e']),
id_skip=('noskip' not in block_string),
se_ratio=float(options['se']) if 'se' in options else None,
stride=[int(options['s'][0])])
@staticmethod
def _encode_block_string(block):
"""Encodes a block to a string."""
args = [
'r%d' % block.num_repeat,
'k%d' % block.kernel_size,
's%d%d' % (block.strides[0], block.strides[1]),
'e%s' % block.expand_ratio,
'i%d' % block.input_filters,
'o%d' % block.output_filters
]
if 0 < block.se_ratio <= 1:
args.append('se%s' % block.se_ratio)
if block.id_skip is False:
args.append('noskip')
return '_'.join(args)
@staticmethod
def decode(string_list):
"""
Decodes a list of string notations to specify blocks inside the network.
:param string_list: a list of strings, each string is a notation of block
:return: a list of BlockArgs namedtuples of block args
"""
assert isinstance(string_list, list)
blocks_args = []
for block_string in string_list:
blocks_args.append(BlockDecoder._decode_block_string(block_string))
return blocks_args
@staticmethod
def encode(blocks_args):
"""
Encodes a list of BlockArgs to a list of strings.
:param blocks_args: a list of BlockArgs namedtuples of block args
:return: a list of strings, each string is a notation of block
"""
block_strings = []
for block in blocks_args:
block_strings.append(BlockDecoder._encode_block_string(block))
return block_strings
def efficientnet(width_coefficient=None, depth_coefficient=None, dropout_rate=0.2,
drop_connect_rate=0.2, image_size=None, num_classes=1000):
""" Creates a efficientnet model. """
blocks_args = [
'r1_k3_s11_e1_i32_o16_se0.25', 'r2_k3_s22_e6_i16_o24_se0.25',
'r2_k5_s22_e6_i24_o40_se0.25', 'r3_k3_s22_e6_i40_o80_se0.25',
'r3_k5_s11_e6_i80_o112_se0.25', 'r4_k5_s22_e6_i112_o192_se0.25',
'r1_k3_s11_e6_i192_o320_se0.25',
]
blocks_args = BlockDecoder.decode(blocks_args)
global_params = GlobalParams(
batch_norm_momentum=0.99,
batch_norm_epsilon=1e-3,
dropout_rate=dropout_rate,
drop_connect_rate=drop_connect_rate,
# data_format='channels_last', # removed, this is always true in PyTorch
num_classes=num_classes,
width_coefficient=width_coefficient,
depth_coefficient=depth_coefficient,
depth_divisor=8,
min_depth=None,
image_size=image_size,
)
return blocks_args, global_params
def get_model_params(model_name, override_params):
""" Get the block args and global params for a given model """
if model_name.startswith('efficientnet'):
w, d, s, p = efficientnet_params(model_name)
# note: all models have drop connect rate = 0.2
blocks_args, global_params = efficientnet(
width_coefficient=w, depth_coefficient=d, dropout_rate=p, image_size=s)
else:
raise NotImplementedError('model name is not pre-defined: %s' % model_name)
if override_params:
# ValueError will be raised here if override_params has fields not included in global_params.
global_params = global_params._replace(**override_params)
return blocks_args, global_params
url_map = {
'efficientnet-b0': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b0-355c32eb.pth',
'efficientnet-b1': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b1-f1951068.pth',
'efficientnet-b2': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b2-8bb594d6.pth',
'efficientnet-b3': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b3-5fb5a3c3.pth',
'efficientnet-b4': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b4-6ed6700e.pth',
'efficientnet-b5': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b5-b6417697.pth',
'efficientnet-b6': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b6-c76e70fd.pth',
'efficientnet-b7': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b7-dcc49843.pth',
}
url_map_advprop = {
'efficientnet-b0': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/adv-efficientnet-b0-b64d5a18.pth',
'efficientnet-b1': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/adv-efficientnet-b1-0f3ce85a.pth',
'efficientnet-b2': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/adv-efficientnet-b2-6e9d97e5.pth',
'efficientnet-b3': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/adv-efficientnet-b3-cdd7c0f4.pth',
'efficientnet-b4': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/adv-efficientnet-b4-44fb3a87.pth',
'efficientnet-b5': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/adv-efficientnet-b5-86493f6b.pth',
'efficientnet-b6': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/adv-efficientnet-b6-ac80338e.pth',
'efficientnet-b7': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/adv-efficientnet-b7-4652b6dd.pth',
'efficientnet-b8': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/adv-efficientnet-b8-22a8fe65.pth',
}
def load_pretrained_weights(model, model_name, load_fc=True, advprop=False):
""" Loads pretrained weights, and downloads if loading for the first time. """
# AutoAugment or Advprop (different preprocessing)
url_map_ = url_map_advprop if advprop else url_map
state_dict = model_zoo.load_url(url_map_[model_name])
if load_fc:
model.load_state_dict(state_dict)
else:
state_dict.pop('_fc.weight')
state_dict.pop('_fc.bias')
res = model.load_state_dict(state_dict, strict=False)
assert set(res.missing_keys) == set(['_fc.weight', '_fc.bias']), 'issue loading pretrained weights'
print('Loaded pretrained weights for {}'.format(model_name))
class MBConvBlock(nn.Module):
"""
Mobile Inverted Residual Bottleneck Block
Args:
block_args (namedtuple): BlockArgs, see above
global_params (namedtuple): GlobalParam, see above
Attributes:
has_se (bool): Whether the block contains a Squeeze and Excitation layer.
"""
def __init__(self, block_args, global_params):
super().__init__()
self._block_args = block_args
self._bn_mom = 1 - global_params.batch_norm_momentum
self._bn_eps = global_params.batch_norm_epsilon
self.has_se = (self._block_args.se_ratio is not None) and (0 < self._block_args.se_ratio <= 1)
self.id_skip = block_args.id_skip # skip connection and drop connect
# Get static or dynamic convolution depending on image size
Conv2d = get_same_padding_conv2d(image_size=global_params.image_size)
# Expansion phase
inp = self._block_args.input_filters # number of input channels
oup = self._block_args.input_filters * self._block_args.expand_ratio # number of output channels
if self._block_args.expand_ratio != 1:
self._expand_conv = Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, bias=False)
self._bn0 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
# Depthwise convolution phase
k = self._block_args.kernel_size
s = self._block_args.stride
self._depthwise_conv = Conv2d(
in_channels=oup, out_channels=oup, groups=oup, # groups makes it depthwise
kernel_size=k, stride=s, bias=False)
self._bn1 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
# Squeeze and Excitation layer, if desired
if self.has_se:
num_squeezed_channels = max(1, int(self._block_args.input_filters * self._block_args.se_ratio))
self._se_reduce = Conv2d(in_channels=oup, out_channels=num_squeezed_channels, kernel_size=1)
self._se_expand = Conv2d(in_channels=num_squeezed_channels, out_channels=oup, kernel_size=1)
# Output phase
final_oup = self._block_args.output_filters
self._project_conv = Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False)
self._bn2 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
self._swish = MemoryEfficientSwish()
def forward(self, inputs, drop_connect_rate=None):
"""
:param inputs: input tensor
:param drop_connect_rate: drop connect rate (float, between 0 and 1)
:return: output of block
"""
# Expansion and Depthwise Convolution
x = inputs
if self._block_args.expand_ratio != 1:
x = self._swish(self._bn0(self._expand_conv(inputs)))
x = self._swish(self._bn1(self._depthwise_conv(x)))
# Squeeze and Excitation
if self.has_se:
x_squeezed = F.adaptive_avg_pool2d(x, 1)
x_squeezed = self._se_expand(self._swish(self._se_reduce(x_squeezed)))
x = torch.sigmoid(x_squeezed) * x
x = self._bn2(self._project_conv(x))
# Skip connection and drop connect
input_filters, output_filters = self._block_args.input_filters, self._block_args.output_filters
if self.id_skip and self._block_args.stride == 1 and input_filters == output_filters:
if drop_connect_rate:
x = drop_connect(x, p=drop_connect_rate, training=self.training)
x = x + inputs # skip connection
return x
def set_swish(self, memory_efficient=True):
"""Sets swish function as memory efficient (for training) or standard (for export)"""
self._swish = MemoryEfficientSwish() if memory_efficient else Swish()
class EfficientNet(nn.Module):
"""
An EfficientNet model. Most easily loaded with the .from_name or .from_pretrained methods
Args:
blocks_args (list): A list of BlockArgs to construct blocks
global_params (namedtuple): A set of GlobalParams shared between blocks
Example:
model = EfficientNet.from_pretrained('efficientnet-b0')
"""
def __init__(self, blocks_args=None, global_params=None):
super().__init__()
assert isinstance(blocks_args, list), 'blocks_args should be a list'
assert len(blocks_args) > 0, 'block args must be greater than 0'
self._global_params = global_params
self._blocks_args = blocks_args
# Get static or dynamic convolution depending on image size
Conv2d = get_same_padding_conv2d(image_size=global_params.image_size)
# Batch norm parameters
bn_mom = 1 - self._global_params.batch_norm_momentum
bn_eps = self._global_params.batch_norm_epsilon
# Stem
in_channels = 3 # rgb
out_channels = | |
null=True, max_digits=5, decimal_places=3)
fuel_price = models.DecimalField(_('price'), null=True, max_digits=15, decimal_places=2)
#-------------- Part ---------------
part_chg_km = models.IntegerField(_('replacement interval, km'), null=True)
part_chg_mo = models.IntegerField(_('replacement interval, months'), null=True)
#-------------- Repl ---------------
repl_manuf = models.CharField(_('manufacturer'), max_length=1000, null=True, blank=True)
repl_part_num = models.CharField(_('catalog number'), max_length=100, null=True, blank=True)
repl_descr = models.CharField(_('name'), max_length=1000, null=True, blank=True)
#------------- Health --------------
diagnosis = models.CharField(_('diagnosis'), max_length=1000, blank=True)
bio_height = models.IntegerField(_('height, cm'), blank=True, null=True)
bio_weight = models.DecimalField(_('weight, kg'), blank=True, null=True, max_digits=5, decimal_places=1)
bio_temp = models.DecimalField(_('temperature'), blank=True, null=True, max_digits=4, decimal_places=1)
bio_waist = models.IntegerField(_('waist circumference'), blank=True, null=True)
bio_systolic = models.IntegerField(_('systolic blood pressure'), blank=True, null=True)
bio_diastolic = models.IntegerField(_('diastolic blood pressure'), blank=True, null=True)
bio_pulse = models.IntegerField(_('the number of heartbeats per minute'), blank=True, null=True)
#------------- Warranty --------------
months = models.IntegerField(_('warranty termin, months'), blank=True, null=True, default=12)
# -------------
class Meta:
verbose_name = _('task')
verbose_name_plural = _('tasks')
def __str__(self):
return self.name
@classmethod
def use_name(cls, app, role):
ret = True
if (app == APP_APART):
ret = (role != ROLE_METER) and (role != ROLE_PRICE) and (role != ROLE_BILL)
if (app == APP_FUEL):
ret = (role != ROLE_FUEL) and (role != ROLE_SERVICE)
return ret
@classmethod
def get_nav_role(cls, app):
nav_role = ''
if (app == APP_APART):
nav_role = ROLE_APART
if (app == APP_FUEL):
nav_role = ROLE_CAR
return nav_role
@classmethod
def set_active_nav_item(cls, user_id, app, active_nav_item_id):
nav_role = cls.get_nav_role(app)
if (not nav_role or not active_nav_item_id):
return None
nav_items = Task.get_role_tasks(user_id, app, nav_role)
nav_items.update(active=False)
nav_item = nav_items.filter(id=active_nav_item_id).get()
nav_item.active = True
nav_item.save()
return nav_item
@classmethod
def get_active_nav_item(cls, user_id, app):
nav_role = cls.get_nav_role(app)
if nav_role:
nav_items = Task.get_role_tasks(user_id, app, nav_role)
if nav_items.filter(active=True).exists():
return nav_items.filter(active=True).order_by('name')[0]
if (len(nav_items) > 0):
return nav_items.order_by('name')[0]
return None
@classmethod
def get_role_tasks(cls, user_id, app, role, nav_item=None):
if user_id:
data = Task.objects.filter(user=user_id)
else:
data = Task.objects.all()
if nav_item:
data = data.filter(task_1=nav_item.id)
if (app != APP_ALL):
role_id = ROLES_IDS[app][role]
if (app == APP_TODO):
data = data.filter(app_task=role_id)
if (app == APP_NOTE):
data = data.filter(app_note=role_id)
if (app == APP_NEWS):
data = data.filter(app_news=role_id)
if (app == APP_STORE):
data = data.filter(app_store=role_id)
if (app == APP_DOCS):
data = data.filter(app_doc=role_id)
if (app == APP_WARR):
data = data.filter(app_warr=role_id)
if (app == APP_EXPEN):
data = data.filter(app_expen=role_id)
if (app == APP_TRIP):
data = data.filter(app_trip=role_id)
if (app == APP_FUEL):
data = data.filter(app_fuel=role_id)
if (app == APP_APART):
data = data.filter(app_apart=role_id)
if (app == APP_HEALTH):
data = data.filter(app_health=role_id)
if (app == APP_WORK):
data = data.filter(app_work=role_id)
if (app == APP_PHOTO):
data = data.filter(app_photo=role_id)
return data
def set_item_attr(self, app, attr):
if not self.item_attr:
value = {}
else:
value = json.loads(self.item_attr)
value[app] = attr
self.item_attr = json.dumps(value)
self.save()
def get_item_attr(self):
if self.item_attr:
return json.loads(self.item_attr)
return {}
def get_roles(self):
roles = []
for app in ROLES_IDS:
app_field = None
if (app == APP_TODO):
app_field = self.app_task
elif (app == APP_NOTE):
app_field = self.app_note
elif (app == APP_NEWS):
app_field = self.app_news
elif (app == APP_STORE):
app_field = self.app_store
elif (app == APP_DOCS):
app_field = self.app_doc
elif (app == APP_WARR):
app_field = self.app_warr
elif (app == APP_EXPEN):
app_field = self.app_expen
elif (app == APP_TRIP):
app_field = self.app_trip
elif (app == APP_FUEL):
app_field = self.app_fuel
elif (app == APP_APART):
app_field = self.app_apart
elif (app == APP_HEALTH):
app_field = self.app_health
elif (app == APP_WORK):
app_field = self.app_work
elif (app == APP_PHOTO):
app_field = self.app_photo
if app_field:
base_role = list(ROLES_IDS[app].values())[0]
for role in ROLES_IDS[app]:
if (app_field == ROLES_IDS[app][role]):
url_role = None
if (role != ROLE_BY_NUM[base_role]):
url_role = role
icon = ROLE_ICON[role]
href = self.get_url_for_app(app, url_role)
roles.append({'icon': icon, 'href': href, 'name': role})
return roles
def get_absolute_url(self):
roles = self.get_roles()
if (len(roles) < 1):
return '/'
return roles[0]['href']
def get_url_for_app(self, app, role):
if not app:
return '/'
id = self.id
try:
if role:
url = reverse(app + ':' + role + '-item', args = [id])
else:
url = reverse(app + ':item', args = [id])
return url
except NoReverseMatch:
return '/'
def toggle_completed(self):
next = None
if (not self.completed) and self.repeat:
if not self.start:
self.start = self.stop # For a repeating task, remember the deadline that is specified in the first iteration in order to use it to adjust the next steps
next = self.next_iteration()
self.completed = not self.completed
if self.completed:
if not self.stop:
self.stop = datetime.now()
self.completion = datetime.now()
else:
self.completion = None
self.save()
self.correct_groups_qty(GIQ_CMP_TASK)
next_task = None
if self.completed and next: # Completed a stage of a recurring task and set a deadline for the next iteration
if not Task.objects.filter(user=self.user, app_task=self.app_task, name=self.name, completed=False).exists():
next_task = Task.objects.create(user=self.user, app_task=self.app_task, name=self.name,
start=self.start, stop=next, important=self.important,
remind=self.next_remind_time(), repeat=self.repeat, repeat_num=self.repeat_num,
repeat_days=self.repeat_days, categories=self.categories, info=self.info)
next_task.set_item_attr(APP_TODO, next_task.get_info(ROLE_TODO))
if TaskGroup.objects.filter(task=self.id, role=ROLE_TODO).exists():
group = TaskGroup.objects.filter(task=self.id, role=ROLE_TODO).get().group
next_task.correct_groups_qty(GIQ_ADD_TASK, group.id)
return next_task
def get_attach_path(self, role):
if role in ROLES_IDS.keys():
app = role
role = list(ROLES_IDS[app].keys())[0]
else:
app = ROLE_APP[role]
ret = app + '/' + role + '_' + str(self.id)
if (app == APP_APART):
match (role, self.app_apart):
case (const.ROLE_APART, const.NUM_ROLE_APART):
ret = APP_APART + '/' + self.name
case (const.ROLE_PRICE, const.NUM_ROLE_PRICE):
ret = APP_APART + '/' + self.task_1.name + '/price/' + APART_SERVICE[self.price_service] + '/' + self.start.strftime('%Y.%m.%d')
case (const.ROLE_METER, const.NUM_ROLE_METER):
ret = APP_APART + '/' + self.task_1.name + '/meter/' + str(self.start.year) + '/' + str(self.start.month).zfill(2)
case (const.ROLE_BILL, const.NUM_ROLE_BILL):
ret = APP_APART + '/' + self.task_1.name + '/bill/' + str(self.start.year) + '/' + str(self.start.month).zfill(2)
if (app == APP_FUEL):
match (role, self.app_fuel):
case (const.ROLE_CAR, const.NUM_ROLE_CAR):
ret = APP_FUEL + '/' + self.name + '/car'
case (const.ROLE_PART, const.NUM_ROLE_PART):
ret = APP_FUEL + '/' + self.task_1.name + '/part/' + self.name
case (const.ROLE_SERVICE, const.NUM_ROLE_SERVICE):
ret = APP_FUEL + '/' + self.task_1.name + '/service/' + self.task_2.name + '/' + self.event.strftime('%Y.%m.%d')
case (const.ROLE_FUEL, const.NUM_ROLE_FUEL):
ret = APP_FUEL + '/' + self.task_1.name + '/fuel/' + self.event.strftime('%Y.%m.%d')
if (app == APP_WARR):
match (role, self.app_warr):
case (const.ROLE_WARR, const.NUM_ROLE_WARR):
ret = APP_WARR + '/' + self.name.replace('/', '_').replace('\\', '_').replace(':', '_').replace('*', '_').replace('?', '_').replace('«', '_').replace('<', '_').replace('>', '_').replace('|', '_')
return storage_path.format(self.user.username) + 'attachments/' + ret + '/'
def get_files_list(self, role):
fss_path = self.get_attach_path(role)
return get_files_list_by_path(role, self.id, fss_path)
def get_info(self, role=ROLE_TODO):
ret = {'attr': []}
if TaskGroup.objects.filter(task=self.id, role=role).exists():
ret['group'] = TaskGroup.objects.filter(task=self.id, role=role).get().group.name
if self.in_my_day:
ret['attr'].append({'myday': True})
step_total = 0
step_completed = 0
for step in Step.objects.filter(task=self.id):
step_total += 1
if step.completed:
step_completed += 1
if (step_total > 0):
if (len(ret['attr']) > 0):
ret['attr'].append({'icon': 'separator'})
ret['attr'].append({'text': '{} {} {}'.format(step_completed, _('out of'), step_total)})
if self.stop:
ret['attr'].append({'termin': True})
links = len(Urls.objects.filter(task=self.id)) > 0
files = (len(self.get_files_list(role)) > 0)
if (self.remind != None) or self.info or links or files:
if (len(ret['attr']) > 0):
ret['attr'].append({'icon': 'separator'})
if (self.remind != None):
ret['attr'].append({'icon': 'remind'})
if links:
ret['attr'].append({'icon': 'url'})
if files:
ret['attr'].append({'icon': 'attach'})
if self.info:
info_descr = self.info[:80]
if len(self.info) > 80:
info_descr += '...'
ret['attr'].append({'icon': 'notes', 'text': info_descr})
if self.categories:
if (len(ret['attr']) > 0):
ret['attr'].append({'icon': 'separator'})
categs = get_categories_list(self.categories)
for categ in categs:
ret['attr'].append({'icon': 'category', 'text': categ.name, 'color': 'category-design-' + categ.design})
if self.completed:
if (len(ret['attr']) > 0):
ret['attr'].append({'icon': 'separator'})
ret['attr'].append({'text': '{}: {}'.format(_('completion').capitalize(), self.completion.strftime('%d.%m.%Y') if self.completion else '')})
return ret
def next_iteration(self):
next = None
if self.stop and self.repeat:
if (self.repeat == DAILY):
next = self.stop + timedelta(self.repeat_num)
elif (self.repeat == WEEKLY):
next = self.stop + timedelta(self.repeat_num * 7)
elif (self.repeat == MONTHLY):
next = add_months(self.stop, self.repeat_num)
if self.start and (next.day != self.start.day):
# For tasks that are repeated on a monthly basis, the day of the next iteration must be adjusted so that it coincides with the day of the first iteration.
# Relevant for tasks with a due date at the end of the month.
d = next.day
m = next.month
y = next.year
last_day = calendar.monthrange(next.year, next.month)[1]
if (last_day < self.start.day):
d = last_day
else:
d = self.start.day
next = date(y, m, d)
elif (self.repeat == ANNUALLY):
d = self.stop.day
m = self.stop.month
y = self.stop.year
y += self.repeat_num
last_day = calendar.monthrange(y, m)[1]
if (d > last_day): # 29.02.YYYY
d = last_day
next = date(y, m, d)
return next
def b_expired(self):
if self.completed:
return False
if self.stop:
return (self.stop < datetime.now()) and ((self.stop.date() != date.today()) or (self.stop.hour != 0) or (self.stop.minute != 0))
return False
def task_actual(self):
if self.completed:
return False
if self.stop:
return (self.stop > datetime.now()) or ((self.stop.date() == date.today()) | |
# End to end happy path to test the minimum set of Reefer Container Shipment reference application components
###################
##### IMPORTS #####
###################
import unittest, os, json, time, requests, random
from kafka.KcProducer import KafkaProducer
from kafka.KcConsumer import KafkaConsumer
##############################
##### READ ENV VARIABLES #####
##############################
try:
KAFKA_BROKERS = os.environ['KAFKA_BROKERS']
except KeyError:
print("The KAFKA_BROKERS environment variable needs to be set.")
exit(1)
# Try to read the Kafka environment from the environment variables
try:
KAFKA_ENV = os.environ['KAFKA_ENV']
except KeyError:
KAFKA_ENV='LOCAL'
# Try to read the Kafka API key from the environment variables
try:
KAFKA_APIKEY = os.environ['KAFKA_APIKEY']
except KeyError:
print("The KAFKA_APIKEY environment variable not set... assume local deployment")
KAFKA_APIKEY=''
# Try to read the container microservice url
try:
CONTAINER_SPRING_MS = os.environ['CONTAINER_SPRING_MS']
except KeyError:
print("The CONTAINER_SPRING_MS environment variable not set... assume local deployment")
CONTAINER_SPRING_MS="springcontainerms:8080"
# Try to read the voyage microservice url
try:
VOYAGE_MS = os.environ['VOYAGE_MS']
except KeyError:
print("The VOYAGE_MS environment variable not set... assume local deployment")
VOYAGE_MS="voyages:3000"
# Try to read the order command microservice url
try:
ORDER_CMD_MS = os.environ['ORDER_CMD_MS']
except KeyError:
print("The ORDER_CMD_MS environment variable not set... assume local deployment")
ORDER_CMD_MS="ordercmd:9080"
try:
ORDER_QUERY_MS = os.environ['ORDER_QUERY_MS']
except KeyError:
print("The ORDER_QUERY_MS environment variable not set... assume local deployment")
ORDER_QUERY_MS="orderquery:9080"
try:
ORDERS_TOPIC = os.environ['ITGTESTS_ORDERS_TOPIC']
except KeyError:
print("The ITGTESTS_ORDERS_TOPIC environment variable not set... assume local deployment")
ORDERS_TOPIC="orders"
try:
ORDER_COMMANDS_TOPIC = os.environ['ITGTESTS_ORDER_COMMANDS_TOPIC']
except KeyError:
print("The ITGTESTS_ORDER_COMMANDS_TOPIC environment variable not set... assume local deployment")
ORDER_COMMANDS_TOPIC="order-commands"
try:
CONTAINERS_TOPIC = os.environ['ITGTESTS_CONTAINERS_TOPIC']
except KeyError:
print("The ITGTESTS_CONTAINERS_TOPIC environment variable not set... assume local deployment")
CONTAINERS_TOPIC="containers"
ORDER_ID=""
CONTAINER_ID=str(random.randrange(10000))
number_of_tests = 0
number_of_test_failed = 0
results_file=None
#####################
##### UNIT TEST #####
#####################
class SagaNoVoyage(unittest.TestCase):
########################################## Reporting ############################################
@classmethod
def setUpClass(cls):
global results_file
results_file = open("/tmp/results.txt","a")
results_file.write('TEST CASE - ' + cls.__name__ + '\n')
results_file.write('-----------------------------------\n')
def setUp(self):
global number_of_tests
number_of_tests += 1
results_file.write(self.id().split('.')[2])
def tearDown(self):
global number_of_test_failed
result = self.defaultTestResult()
self._feedErrorsToResult(result, self._outcome.errors)
error = self.list2reason(result.errors)
failure = self.list2reason(result.failures)
ok = not error and not failure
if not ok:
results_file.write('...FAILED\n')
number_of_test_failed += 1
else:
results_file.write('...OK\n')
@classmethod
def tearDownClass(cls):
global results_file
results_file.write('-----------------------------------\n')
results_file.write('PASSED: ' + str(number_of_tests) + '\n')
results_file.write('FAILED: ' + str(number_of_test_failed) + '\n\n')
results_file.close()
def list2reason(self, exc_list):
if exc_list and exc_list[-1][0] is self:
return exc_list[-1][1]
#################################################################################################
def test1_createContainer(self):
print('-------------------------------')
print('-- [TEST] : Create container --')
print('-------------------------------\n')
print("1 - Load the container event from json file")
# Open file to read
f = open('../data/containerCreateEvent.json','r')
# Load the container from file
new_container = json.load(f)
# Verify we have read a container
self.assertIsNotNone(new_container)
# Provide the timestamp for the creation time of the container/event
new_container['timestamp'] = int(time.time())
# Verify the container has a valid timestamp
self.assertGreater(new_container['timestamp'],0)
# Provide the container ID
new_container['containerID'] = CONTAINER_ID
new_container['payload']['containerID'] = CONTAINER_ID
# Setting capacity big enough so that it can not be carried in any of the existing voyages
new_container['payload']['capacity'] = 50000
print("Container event to be sent:")
print(json.dumps(new_container, indent=4, sort_keys=True))
# Close file
f.close()
print("Done\n")
print("2 - Post container event into the containers topic")
# Create a KafkaProducer object to interact with Kafka/Event Streams
kp = KafkaProducer(KAFKA_ENV,KAFKA_BROKERS,KAFKA_APIKEY)
# Verify we have a KafkaProducer object
self.assertIsNotNone(kp)
kp.prepareProducer("ProduceContainerPython")
# Verify the producer has been created
self.assertIsNotNone(kp.producer)
# Publish the create container event
kp.publishEvent(CONTAINERS_TOPIC,new_container,"containerID")
print("Done\n")
print("Sleeping for 5 secs\n")
time.sleep(5)
print("3 - Read container event from the containers topic")
# Create a KafkaConsumer object to interact with Kafka/Event Streams
kc = KafkaConsumer(KAFKA_ENV,KAFKA_BROKERS,KAFKA_APIKEY,CONTAINERS_TOPIC)
# Verify we have a KafkaConsumer object
self.assertIsNotNone(kc)
kc.prepareConsumer()
# Verify the consumer has been created
self.assertIsNotNone(kc.consumer)
# Read next event in the topic by key
read_container = kc.pollNextEventByKey(CONTAINER_ID)
# A container event object is read
self.assertIsNotNone(read_container)
print("This is the container event read:")
print(json.dumps(read_container, indent=4, sort_keys=True))
# Close the Kafka/Event Streams consumer
kc.close()
print("Done\n")
print("4 - Compare events")
# Verify new container event sent and container event read from the topic are the same
self.assertEqual(sorted(new_container.items()),sorted(read_container.items()))
print("Done\n")
print("5 - Read container object from the container microservice's API endpoint")
response = requests.get("http://" + CONTAINER_SPRING_MS + "/containers")
# Verify we get a response
self.assertIsNotNone(response)
# Load containers from the response
json_data = json.loads(response.text)
# Verify we get at least one container back
self.assertGreater(len(json_data['content']),0)
# Get latest container
api_container = json_data['content'][len(json_data['content'])-1]
# Verify we have a container
self.assertIsNotNone(api_container)
print("This is the API container object")
print(json.dumps(api_container, indent=4, sort_keys=True))
print("Done\n")
print("6 - Read expected empty container from json file")
# Open file to read
f2 = open('../data/containerEmptyEvent.json','r')
# Load the expected container object
expected_container = json.load(f2)
# Verify we have a container
self.assertIsNotNone(expected_container)
# For simplicity, we will not work out timestamps
expected_container['createdAt'] = api_container['createdAt']
expected_container['updatedAt'] = api_container['updatedAt']
# Assign the containerID
expected_container['id'] = CONTAINER_ID
# Setting the capacity
expected_container['capacity'] = 50000
print("This is the expected container object:")
print(json.dumps(expected_container, indent=4, sort_keys=True))
# Close the file
f2.close()
print("Done\n")
print("7 - Compare Containers")
# Verify the container object returned by the API endpoint is the expected container object
self.assertEqual(sorted(expected_container.items()),sorted(api_container.items()))
print("Done\n")
def test2_createOrder(self):
print('-----------------------------')
print('--- [TEST] : Create order ---')
print('-----------------------------\n')
# We must use the global scope variable as this value will be used throughout the entire test
global ORDER_ID
print("1 - Load the order request from json")
# Open file to read
f = open('../data/FreshProductOrder.json','r')
# Load the order to be sent
order = json.load(f)
# Setting appropriate quantity for the order
# so that it fits into the container created in test1
# but can not fit into any existing voyage
order['quantity'] = 50000
# Close the file
f.close()
print("Done\n")
print("2 - Create order by POST to order microservice's API endpoint")
res = requests.post("http://" + ORDER_CMD_MS + "/orders",json=order)
# Get the request response as a JSON object
orderCommand = json.loads(res.text)
# Grab the orderID from the JSON object
ORDER_ID = orderCommand['orderID']
print("The order ID for the order created is: {}".format(ORDER_ID))
# Verify ORDER_ID is not None
self.assertIsNotNone(ORDER_ID)
# Verify ORDER_ID is not an empty string
self.assertNotEqual(str(ORDER_ID),"")
print("Done\n")
print("Sleeping for 5 secs\n")
time.sleep(10)
print("3 - Make sure a new order command event was delivered into the order-commands topic")
# Create a KafkaConsumer object to interact with Kafka/Event Streams
kc = KafkaConsumer(KAFKA_ENV,KAFKA_BROKERS,KAFKA_APIKEY,ORDER_COMMANDS_TOPIC)
# Verify we have a KafkaConsumer object
self.assertIsNotNone(kc)
kc.prepareConsumer()
# Verify the consumer has been created
self.assertIsNotNone(kc.consumer)
# Read next event in the topic by key
order_command = kc.pollNextEventByKey(ORDER_ID)
# Verify an order command event object is read
self.assertIsNotNone(order_command)
# Removing the timestamp from the comparison since we can't know what time exactly it was created at
order_command['timestampMillis'] = ""
print("This is the order command event read from the topic:")
print(json.dumps(order_command, indent=4, sort_keys=True))
# Close the Kafka/Event Streams consumer
kc.close()
print("Done\n")
print("4 - Load the expected order command event from json file")
# Open file to read
f = open('../data/orderCommandEvent.json','r')
# Load expected order command event
expected_order_command = json.load(f)
# Verify we have read a container
self.assertIsNotNone(expected_order_command)
# Assign the orderID
expected_order_command['payload']['orderID'] = ORDER_ID
# Setting the quantity appropriately
expected_order_command['payload']['quantity'] = 50000
print("The expected order command event is:")
print(json.dumps(expected_order_command, indent=4, sort_keys=True))
# Close the file
f.close()
print("Done\n")
print("5 - Verify order command event")
# Verify order command event read from the topic is as expected
self.assertEqual(sorted(expected_order_command.items()),sorted(order_command.items()))
print("Done\n")
print("Sleeping for 5 secs\n")
time.sleep(10)
print("6 - Make sure a new order event was delivered into the orders topic")
# Create a KafkaConsumer object to interact with Kafka/Event Streams
kc = KafkaConsumer(KAFKA_ENV,KAFKA_BROKERS,KAFKA_APIKEY,ORDERS_TOPIC)
# Verify we have a KafkaConsumer object
self.assertIsNotNone(kc)
kc.prepareConsumer()
# Verify the consumer has been created
self.assertIsNotNone(kc.consumer)
# Read next event in the topic by key
order = kc.pollNextEventByKey(ORDER_ID)
# Verify an order command event object is read
self.assertIsNotNone(order)
# Removing the timestamp from the comparison since we can't know what time exactly it was created at
order['timestampMillis'] = ""
print("This is the order event read from the topic:")
print(json.dumps(order, indent=4, sort_keys=True))
# Close the Kafka/Event Streams consumer
kc.close()
print("Done\n")
print("7 - Load the expected order event from json file")
# Open file to read
f = open('../data/orderCreatedEvent.json','r')
# Load expected order event
expected_order = json.load(f)
# Verify we have read a container
self.assertIsNotNone(expected_order)
# Assign orderID
expected_order['payload']['orderID'] = ORDER_ID
# Setting quantity appropriately
expected_order['payload']['quantity'] = 50000
print("The expected order event is:")
print(json.dumps(expected_order, indent=4, sort_keys=True))
# Close the file
f.close()
print("Done\n")
print("8 - Verify order event")
# Verify order event read from the topic | |
<filename>viroconcom/plot.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Plots datasets, model fits and contour coordinates.
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
__all__ = ["plot_sample", "plot_marginal_fit", "plot_dependence_functions",
"plot_contour", "SamplePlotData", "plot_confidence_interval",
"plot_wave_breaking_limit", "hs_from_limiting_sig_wave_steepness"]
def plot_sample(plotted_sample, ax=None, do_plot_rasterized=True):
"""
Plots the sample of metocean data.
Parameters
----------
plotted_sample : SamplePlotData,
The sample that should be plotted and its meta information.
"""
if ax is None:
ax = plotted_sample.ax
ps = plotted_sample
x = ps.x
y = ps.y
if ps.x_inside is not None and ps.y_inside is not None:
inside_label = 'inside contour'
outside_label = 'outside contour'
ax.scatter(ps.x_inside, ps.y_inside, s=11, alpha=0.5, c='k',
marker='o', label=inside_label, rasterized=do_plot_rasterized)
ax.scatter(ps.x_outside, ps.y_outside, s=9, alpha=0.5, c='r',
marker='D', label=outside_label, rasterized=do_plot_rasterized)
else:
if ps.label:
ax.scatter(x, y, s=40, alpha=0.5, c='k', marker='.',
label=ps.label, rasterized=do_plot_rasterized)
else:
ax.scatter(x, y, s=40, alpha=0.5, c='k', marker='.',
label='observation', rasterized=do_plot_rasterized)
def plot_marginal_fit(sample, dist, fig, ax=None, label=None, color_sample='k',
marker_sample='x', marker_size_sample=3, color_fit='b',
dataset_char='?',legend_fontsize=8):
"""
Plots the fitted marginal distribution versus a dataset in a quantile-
quantile (QQ) plot.
Parameters
----------
sample : ndarray of floats
The environmental data sample that should be plotted against the fit.
dist : Distribution
The distribution that has been fitted.
fig : matplotlib Figure
Figure object that shall be used for the plot.
ax : matplotlib Axes
Axes object on the figure that shall be used for the plot.
label : string
Description of the random variable / sample, e.g. '$h_s$ (m)'.
color_sample : color (char, string or RGB)
Character that represents in a color using the matplotlib conventions.
marker_sample : char
Character that represents a marker using the matplotlib conventions.
marker_size_sample : int
Number that specifies the marker's size using the matplotlib conventions.
color_fit : color (char, string or RGB)
Character that represents in a color using the matplotlib conventions.
dataset_char : char
Character, which is the name of the dataset, e.g. 'A'
legend_fontsize int,
Fontsize of the legend text.
"""
if ax is None:
ax = fig.add_subplot(111)
plt.sca(ax)
stats.probplot(sample, dist=dist, plot=ax)
ax.get_lines()[0].set_markerfacecolor(color_sample)
ax.get_lines()[0].set_markeredgecolor(color_sample)
ax.get_lines()[0].set_marker(marker_sample)
ax.get_lines()[0].set_markersize(marker_size_sample)
ax.get_lines()[1].set_color(color_fit)
ax.title.set_text('')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
if max(sample) < 12:
plt.xlim((0, 12))
plt.ylim((0, 15.5))
else:
plt.xlim((0, 35))
plt.ylim((0, 35))
if dist.name == 'ExponentiatedWeibull':
dist_description = 'Exponentiated Weibull\n' \
'($\\alpha$=' + str('%.3g' % dist.scale(0)) + ', ' \
'$\\beta$=' + str('%.3g' % dist.shape(0)) + ', ' \
'$\\delta$=' + str('%.3g' % dist.shape2(0)) +')'
elif dist.name == 'Weibull':
dist_description = 'Weibull, ' \
'$\\alpha$=' + str('%.3g' % dist.scale(0)) + ', ' \
'$\\beta$=' + str('%.3g' % dist.shape(0))
else:
dist_description = dist.name
plt.legend(['Dataset '+ dataset_char, dist_description], loc='upper left',
frameon=False, prop={'size': legend_fontsize})
plt.xlabel('Theoretical quantiles, ' + str(label).lower())
plt.ylabel('Ordered values, ' + str(label).lower())
def plot_dependence_functions(
fit, fig, ax1=None, ax2=None, unconditonal_variable_label=None,
marker_discrete='o', markersize_discrete=5,
markerfacecolor_discrete='lightgray', markeredgecolor_discrete='k',
style_dependence_function='b-', legend_fontsize=8):
"""
Plots the fitted dependence function using two subplots, one subplot showing
the fit of the shape value and one subplot showing the fit of the scale
value.
This funciton only works if the conditional distribution is a Weibull
distribution or a lognormal disribution.
Parameters
----------
fit : Fit
fig : matplotlib Figure
Figure object that shall be used for the plot.
ax1 : matplotlib Axes, defeaults to None
Axes object on the figure that shall be used for the plot.
ax2 : matplotlib Axes, defeaults to None
Axes object on the figure that shall be used for the plot.
unconditonal_variable_label : str, defaults to None
marker_discrete : char, defaults to 'o'
markersize_discrete : int, defaults to 5
markerfacecolor_discrete : color (char, string or RGB), defaults to 'lightgray'
markeredgecolor_discrete : color (char, string or RGB),d efaults to 'k'
style_dependence_function : str, defaults to 'b-'
Style of the fitted dependence function.
legend_fontsize : int, defaults to 8
Font size of the legend's text.
Raises
------
NotImplementedError
If the distribution that shall be plotted is not supported yet.
"""
supported_dists = ['ExponentiatedWeibull', 'Lognormal', 'Weibull']
if fit.mul_var_dist.distributions[1].name not in supported_dists:
raise NotImplementedError(
'The distribution you tried to plot is not not supported in '
'plot_dependence_functions. You used the distribution {}'
' .'.format(fit.mul_var_dist.distributions[1]))
if ax1 is None:
ax1 = fig.add_subplot(121)
if ax2 is None:
ax2 = fig.add_subplot(122)
plt.sca(ax1)
scale_at = fit.multiple_fit_inspection_data[1].scale_at
x1 = np.linspace(0, max(scale_at)*1.1, 100)
if fit.mul_var_dist.distributions[1].scale.func_name == 'power3':
dp_function = '$' + str('%.3g' % fit.mul_var_dist.distributions[1].scale.a) + \
'+' + str('%.3g' % fit.mul_var_dist.distributions[1].scale.b) + \
'\cdot h_s^{' + str('%.3g' % fit.mul_var_dist.distributions[1].scale.c) + '}$'
elif fit.mul_var_dist.distributions[1].scale.func_name == 'lnsquare2':
dp_function = '$\ln(' + str('%.3g' % fit.mul_var_dist.distributions[1].scale.a) + \
'+' + str('%.3g' % fit.mul_var_dist.distributions[1].scale.b) + \
'\sqrt{h_s / g})$'
elif fit.mul_var_dist.distributions[1].scale.func_name == 'alpha3':
dp_function = '$(' + str('%.3g' % fit.mul_var_dist.distributions[1].scale.a) + \
'+' + str('%.3g' % fit.mul_var_dist.distributions[1].scale.b) + \
'\cdot v^{' + str('%.3g' % fit.mul_var_dist.distributions[1].scale.c) + \
'}) / 2.0445^{(1 / \\beta_{hs})}$'
else:
dp_function = str(fit.mul_var_dist.distributions[1].scale)
if fit.mul_var_dist.distributions[1].name == 'Lognormal':
plt.plot(scale_at, np.log(fit.multiple_fit_inspection_data[1].scale_value),
marker_discrete,
markersize=markersize_discrete,
markerfacecolor=markerfacecolor_discrete,
markeredgecolor=markeredgecolor_discrete,
label='from marginal distribution')
plt.plot(x1, np.log(fit.mul_var_dist.distributions[1].scale(x1)),
style_dependence_function, label=dp_function)
ylabel = '$μ_{tz}$'
plt.xlim((0, 6))
plt.ylim((0.9, 2.15))
if fit.mul_var_dist.distributions[1].name == 'Weibull' or \
fit.mul_var_dist.distributions[1].name == 'ExponentiatedWeibull':
plt.plot(scale_at, fit.multiple_fit_inspection_data[1].scale_value,
marker_discrete,
markersize=markersize_discrete,
markerfacecolor=markerfacecolor_discrete,
markeredgecolor=markeredgecolor_discrete,
label='from marginal distribution')
plt.plot(x1, fit.mul_var_dist.distributions[1].scale(x1),
style_dependence_function, label=dp_function)
ylabel = '$α_{hs}$'
plt.xlim((0, 30))
plt.ylim((0, 10))
plt.xlabel(unconditonal_variable_label)
plt.legend(frameon=False, prop={'size': legend_fontsize})
ax1.spines['right'].set_visible(False)
ax1.spines['top'].set_visible(False)
plt.ylabel(ylabel)
plt.sca(ax2)
shape_at = fit.multiple_fit_inspection_data[1].shape_at
x1 = np.linspace(0, max(shape_at)*1.1, 100)
plt.plot(shape_at, fit.multiple_fit_inspection_data[1].shape_value,
marker_discrete,
markersize=markersize_discrete,
markerfacecolor=markerfacecolor_discrete,
markeredgecolor=markeredgecolor_discrete,)
plt.plot(x1, fit.mul_var_dist.distributions[1].shape(x1),
style_dependence_function)
plt.xlabel(unconditonal_variable_label)
if fit.mul_var_dist.distributions[1].name == 'Lognormal':
plt.xlim((0, 6))
plt.ylim((0.065, 0.33))
ylabel = '$σ_{tz}$'
if fit.mul_var_dist.distributions[1].shape.func_name == 'exp3':
dp_function = '$' + str('%.3g' % fit.mul_var_dist.distributions[1].shape.a) + \
'+' + str('%.3g' % fit.mul_var_dist.distributions[1].shape.b) + \
'\exp (' + str('%.3g' % fit.mul_var_dist.distributions[1].shape.c) + \
'h_s)$'
elif fit.mul_var_dist.distributions[1].shape.func_name == 'powerdecrease3':
dp_function = '$' + str('%.4f' % fit.mul_var_dist.distributions[1].shape.a) + \
'+ 1 / (h_s + ' + str('%.3g' % fit.mul_var_dist.distributions[1].shape.b) + \
')^{' + str('%.3g' % fit.mul_var_dist.distributions[1].shape.c) + \
'}$'
elif fit.mul_var_dist.distributions[1].shape.func_name == 'asymdecrease3':
dp_function = '$' + str('%.4f' % fit.mul_var_dist.distributions[1].shape.a) + \
' + ' + str('%.3g' % fit.mul_var_dist.distributions[1].shape.b) + \
' / (1 + ' + str('%.3g' % fit.mul_var_dist.distributions[1].shape.c) + \
' h_s )$'
if fit.mul_var_dist.distributions[1].name == 'Weibull' or \
fit.mul_var_dist.distributions[1].name == 'ExponentiatedWeibull':
ylabel = '$β_{h_s}$'
plt.xlim((0, 30))
plt.ylim((0, 3.5))
if fit.mul_var_dist.distributions[1].shape.func_name == 'power3':
dp_function = '$' + str('%.3g' % fit.mul_var_dist.distributions[1].shape.a) + \
'+' + str('%.3g' % fit.mul_var_dist.distributions[1].shape.b) + \
'\cdot h_s^{' + str('%.3g' % fit.mul_var_dist.distributions[1].shape.c) + '}$'
elif fit.mul_var_dist.distributions[1].shape.func_name == 'logistics4':
# logistics4 uses np.abs(c), to display it nicer, abs(c) is shown.
absOfC = np.abs(fit.mul_var_dist.distributions[1].shape.c)
dp_function = '$' + str('%.3g' % fit.mul_var_dist.distributions[1].shape.a) + \
'+' + str('%.3g' % fit.mul_var_dist.distributions[1].shape.b) + \
'/ [1 + e^{-' + str('%.3g' % absOfC) + \
'(v - ' + str('%.3g' % fit.mul_var_dist.distributions[1].shape.d) + \
')}]$'
else:
dp_function = str(fit.mul_var_dist.distributions[1].shape)
plt.legend(['from marginal distribution', dp_function], frameon=False, prop={'size': legend_fontsize})
ax2.spines['right'].set_visible(False)
ax2.spines['top'].set_visible(False)
plt.ylabel(ylabel)
def plot_contour(x, y, ax, contour_label=None, x_label=None, y_label=None,
line_style='b-', alpha=1, plotted_sample=None, x_lim = None,
upper_ylim=None, median_x=None, median_y=None, median_style='r-',
median_label='median of x2|x1'):
"""
Plots the environmental contour.
The method expects the coordinates to be ordered by angle.
Parameters
----------
x : ndarray of floats
The contour's coordinates in the x-direction.
y : ndarray of floats
The contour's coordiantes in the y-direction.
ax : Axes
Axes of the figure where the contour should be plotted.
contour_label : str, optional (defaults to None)
The environmental contour's label that will be used in the legend.
x_label : str, optional (defaults to None)
Label for the x-axis.
y_label : str, optional (defaults to None)
Label for the y-axis.
line_style : str, optional (defaults to 'b-')
Matplotlib line style.
alpha : float, optional (default to 1)
Alpha value (transparency) for the contour's line.
plotted_sample : SamplePlotData, optional (defaults to None)
The sample that should be plotted and its meta information.
x_lim : tuple of floats, optional (defaults to None)
x-Axis limit.
upper_ylim : float, optional (defaults to None)
y-Axis limit.
median_x : ndarray of floats, optional (defaults to None)
If the median of x2|x1 should be plotted, these are the x-values.
median_y : ndarray of floats, optional (defaults to None)
If the median of x2|x1 should be plotted, these are the y-values.
median_style : str, optional (defaults to 'r-')
Matplotlib line style for plotting the median of x2|x1.
median_label : str, optional (defaults to 'median | |
"LetterSpace": "\uF754",
"LibraSign": "\u264E",
"LightBulb": "\uF723",
"Limit": "\uF438",
"LineSeparator": "\u2028",
"LongDash": "\u2014",
"LongEqual": "\uF7D9",
"LongLeftArrow": "\u27F5",
"LongLeftRightArrow": "\u27F7",
"LongRightArrow": "\u27F6",
"LowerLeftArrow": "\u2199",
"LowerRightArrow": "\u2198",
"Mars": "\u2642",
"MathematicaIcon": "\uF757",
"MaxLimit": "\uF439",
"MeasuredAngle": "\u2221",
"MediumSpace": "\u205F",
"Mercury": "\u263F",
"Mho": "\u2127",
"Micro": "\u00B5",
"MinLimit": "\uF43A",
"Minus": "\u2212",
"MinusPlus": "\u2213",
"Mod1Key": "\uF7D6",
"Mod2Key": "\uF7D7",
"Moon": "\u263E",
"Mu": "\u03BC",
"NHacek": "\u0148",
"NTilde": "\u00F1",
"Nand": "\u22BC",
"Natural": "\u266E",
"NegativeMediumSpace": "\uF383",
"NegativeThickSpace": "\uF384",
"NegativeThinSpace": "\uF382",
"NegativeVeryThinSpace": "\uF380",
"Neptune": "\u2646",
"NestedGreaterGreater": "\u2AA2",
"NestedLessLess": "\u2AA1",
"NeutralSmiley": "\uF722",
"NewLine": "\u000A",
"NoBreak": "\u2060",
"NonBreakingSpace": "\u00A0",
"Nor": "\u22BD",
"Not": "\u00AC",
"NotCongruent": "\u2262",
"NotCupCap": "\u226D",
"NotDoubleVerticalBar": "\u2226",
"NotElement": "\u2209",
"NotEqual": "\u2260",
"NotEqualTilde": "\uF400",
"NotExists": "\u2204",
"NotGreater": "\u226F",
"NotGreaterEqual": "\u2271",
"NotGreaterFullEqual": "\u2269",
"NotGreaterGreater": "\uF427",
"NotGreaterLess": "\u2279",
"NotGreaterSlantEqual": "\uF429",
"NotGreaterTilde": "\u2275",
"NotHumpDownHump": "\uF402",
"NotHumpEqual": "\uF401",
"NotLeftTriangle": "\u22EA",
"NotLeftTriangleBar": "\uF412",
"NotLeftTriangleEqual": "\u22EC",
"NotLess": "\u226E",
"NotLessEqual": "\u2270",
"NotLessFullEqual": "\u2268",
"NotLessGreater": "\u2278",
"NotLessLess": "\uF422",
"NotLessSlantEqual": "\uF424",
"NotLessTilde": "\u2274",
"NotNestedGreaterGreater": "\uF428",
"NotNestedLessLess": "\uF423",
"NotPrecedes": "\u2280",
"NotPrecedesEqual": "\uF42B",
"NotPrecedesSlantEqual": "\u22E0",
"NotPrecedesTilde": "\u22E8",
"NotReverseElement": "\u220C",
"NotRightTriangle": "\u22EB",
"NotRightTriangleBar": "\uF413",
"NotRightTriangleEqual": "\u22ED",
"NotSquareSubset": "\uF42E",
"NotSquareSubsetEqual": "\u22E2",
"NotSquareSuperset": "\uF42F",
"NotSquareSupersetEqual": "\u22E3",
"NotSubset": "\u2284",
"NotSubsetEqual": "\u2288",
"NotSucceeds": "\u2281",
"NotSucceedsEqual": "\uF42D",
"NotSucceedsSlantEqual": "\u22E1",
"NotSucceedsTilde": "\u22E9",
"NotSuperset": "\u2285",
"NotSupersetEqual": "\u2289",
"NotTilde": "\u2241",
"NotTildeEqual": "\u2244",
"NotTildeFullEqual": "\u2247",
"NotTildeTilde": "\u2249",
"NotVerticalBar": "\u2224",
"Nu": "\u03BD",
"Null": "\uF3A0",
"NumberSign": "\uF724",
"OAcute": "\u00F3",
"ODoubleAcute": "\u0151",
"ODoubleDot": "\u00F6",
"OE": "\u0153",
"OGrave": "\u00F2",
"OHat": "\u00F4",
"OSlash": "\u00F8",
"OTilde": "\u00F5",
"Omega": "\u03C9",
"Omicron": "\u03BF",
"OpenCurlyDoubleQuote": "\u201C",
"OpenCurlyQuote": "\u2018",
"OptionKey": "\uF7D2",
"Or": "\u2228",
"OverBrace": "\uFE37",
"OverBracket": "\u23B4",
"OverParenthesis": "\uFE35",
"PageBreakAbove": "\uF3BD",
"PageBreakBelow": "\uF3BE",
"Paragraph": "\u00B6",
"ParagraphSeparator": "\u2029",
"PartialD": "\u2202",
"PermutationProduct": "\uF3DE",
"Perpendicular": "\u27C2",
"Phi": "\u03D5",
"Pi": "\u03C0",
"Piecewise": "\uF361",
"PiscesSign": "\u2653",
"Placeholder": "\uF528",
"PlusMinus": "\u00B1",
"Pluto": "\u2647",
"Precedes": "\u227A",
"PrecedesEqual": "\u2AAF",
"PrecedesSlantEqual": "\u227C",
"PrecedesTilde": "\u227E",
"Prime": "\u2032",
"ProbabilityPr": "\uF3DC",
"Product": "\u220F",
"Proportion": "\u2237",
"Proportional": "\u221D",
"Psi": "\u03C8",
"QuarterNote": "\u2669",
"RHacek": "\u0159",
"RawAmpersand": "\u0026",
"RawAt": "\u0040",
"RawBackquote": "\u0060",
"RawBackslash": "\u005C",
"RawColon": "\u003A",
"RawComma": "\u002C",
"RawDash": "\u002D",
"RawDollar": "\u0024",
"RawDot": "\u002E",
"RawDoubleQuote": "\u0022",
"RawEqual": "\u003D",
"RawEscape": "\u001B",
"RawExclamation": "\u0021",
"RawGreater": "\u003E",
"RawLeftBrace": "\u007B",
"RawLeftBracket": "\u005B",
"RawLeftParenthesis": "\u0028",
"RawLess": "\u003C",
"RawNumberSign": "\u0023",
"RawPercent": "\u0025",
"RawPlus": "\u002B",
"RawQuestion": "\u003F",
"RawQuote": "\u0027",
"RawReturn": "\u000D",
"RawRightBrace": "\u007D",
"RawRightBracket": "\u005D",
"RawRightParenthesis": "\u0029",
"RawSemicolon": "\u003B",
"RawSlash": "\u002F",
"RawSpace": "\u0020",
"RawStar": "\u002A",
"RawTab": "\u0009",
"RawTilde": "\u007E",
"RawUnderscore": "\u005F",
"RawVerticalBar": "\u007C",
"RawWedge": "\u005E",
"RegisteredTrademark": "\u00AE",
"ReturnIndicator": "\u21B5",
"ReturnKey": "\uF766",
"ReverseDoublePrime": "\u2036",
"ReverseElement": "\u220B",
"ReverseEquilibrium": "\u21CB",
"ReversePrime": "\u2035",
"ReverseUpEquilibrium": "\u296F",
"Rho": "\u03C1",
"RightAngle": "\u221F",
"RightAngleBracket": "\u232A",
"RightArrow": "\u2192",
"RightArrowBar": "\u21E5",
"RightArrowLeftArrow": "\u21C4",
"RightBracketingBar": "\uF604",
"RightCeiling": "\u2309",
"RightDoubleBracket": "\u301B",
"RightDoubleBracketingBar": "\uF606",
"RightDownTeeVector": "\u295D",
"RightDownVector": "\u21C2",
"RightDownVectorBar": "\u2955",
"RightFloor": "\u230B",
"RightGuillemet": "\u00BB",
"RightModified": "\uF76C",
"RightPointer": "\u25B8",
"RightSkeleton": "\uF762",
"RightTee": "\u22A2",
"RightTeeArrow": "\u21A6",
"RightTeeVector": "\u295B",
"RightTriangle": "\u22B3",
"RightTriangleBar": "\u29D0",
"RightTriangleEqual": "\u22B5",
"RightUpDownVector": "\u294F",
"RightUpTeeVector": "\u295C",
"RightUpVector": "\u21BE",
"RightUpVectorBar": "\u2954",
"RightVector": "\u21C0",
"RightVectorBar": "\u2953",
"RoundImplies": "\u2970",
"RoundSpaceIndicator": "\uF3B2",
"Rule": "\uF522",
"RuleDelayed": "\uF51F",
"SHacek": "\u0161",
"SZ": "\u00DF",
"SadSmiley": "\u2639",
"SagittariusSign": "\u2650",
"Sampi": "\u03E0",
"Saturn": "\u2644",
"ScorpioSign": "\u264F",
"ScriptA": "\uF6B2",
"ScriptB": "\uF6B3",
"ScriptC": "\uF6B4",
"ScriptCapitalA": "\uF770",
"ScriptCapitalB": "\u212C",
"ScriptCapitalC": "\uF772",
"ScriptCapitalD": "\uF773",
"ScriptCapitalE": "\u2130",
"ScriptCapitalF": "\u2131",
"ScriptCapitalG": "\uF776",
"ScriptCapitalH": "\u210B",
"ScriptCapitalI": "\u2110",
"ScriptCapitalJ": "\uF779",
"ScriptCapitalK": "\uF77A",
"ScriptCapitalL": "\u2112",
"ScriptCapitalM": "\u2133",
"ScriptCapitalN": "\uF77D",
"ScriptCapitalO": "\uF77E",
"ScriptCapitalP": "\u2118",
"ScriptCapitalQ": "\uF780",
"ScriptCapitalR": "\u211B",
"ScriptCapitalS": "\uF782",
"ScriptCapitalT": "\uF783",
"ScriptCapitalU": "\uF784",
"ScriptCapitalV": "\uF785",
"ScriptCapitalW": "\uF786",
"ScriptCapitalX": "\uF787",
"ScriptCapitalY": "\uF788",
"ScriptCapitalZ": "\uF789",
"ScriptD": "\uF6B5",
"ScriptDotlessI": "\uF730",
"ScriptDotlessJ": "\uF731",
"ScriptE": "\u212F",
"ScriptEight": "\uF7F8",
"ScriptF": "\uF6B7",
"ScriptFive": "\uF7F5",
"ScriptFour": "\uF7F4",
"ScriptG": "\u210A",
"ScriptH": "\uF6B9",
"ScriptI": "\uF6BA",
"ScriptJ": "\uF6BB",
"ScriptK": "\uF6BC",
"ScriptL": "\u2113",
"ScriptM": "\uF6BE",
"ScriptN": "\uF6BF",
"ScriptNine": "\uF7F9",
"ScriptO": "\u2134",
"ScriptOne": "\uF7F1",
"ScriptP": "\uF6C1",
"ScriptQ": "\uF6C2",
"ScriptR": "\uF6C3",
"ScriptS": "\uF6C4",
"ScriptSeven": "\uF7F7",
"ScriptSix": "\uF7F6",
"ScriptT": "\uF6C5",
"ScriptThree": "\uF7F3",
"ScriptTwo": "\uF7F2",
"ScriptU": "\uF6C6",
"ScriptV": "\uF6C7",
"ScriptW": "\uF6C8",
"ScriptX": "\uF6C9",
"ScriptY": "\uF6CA",
"ScriptZ": "\uF6CB",
"ScriptZero": "\uF7F0",
"Section": "\u00A7",
"SelectionPlaceholder": "\uF527",
"Shah": "\uF11D",
"Sharp": "\u266F",
"ShiftKey": "\uF7D5",
"ShortDownArrow": "\uF52B",
"ShortLeftArrow": "\uF526",
"ShortRightArrow": "\uF525",
"Sigma": "\u03C3",
"SixPointedStar": "\u2736",
"SkeletonIndicator": "\u2043",
"SmallCircle": "\u2218",
"SpaceIndicator": "\u2423",
"SpaceKey": "\uF7BF",
"SpadeSuit": "\u2660",
"SpanFromAbove": "\uF3BB",
"SpanFromBoth": "\uF3BC",
"SpanFromLeft": "\uF3BA",
"SphericalAngle": "\u2222",
"Sqrt": "\u221A",
"Square": "\uF520",
"SquareIntersection": "\u2293",
"SquareSubset": "\u228F",
"SquareSubsetEqual": "\u2291",
"SquareSuperset": "\u2290",
"SquareSupersetEqual": "\u2292",
"SquareUnion": "\u2294",
"Star": "\u22C6",
"StepperDown": "\uF3CD",
"StepperLeft": "\uF3CB",
"StepperRight": "\uF3CA",
"StepperUp": "\uF3CC",
"Sterling": "\u00A3",
"Stigma": "\u03DB",
"Subset": "\u2282",
"SubsetEqual": "\u2286",
"Succeeds": "\u227B",
"SucceedsEqual": "\u2AB0",
"SucceedsSlantEqual": "\u227D",
"SucceedsTilde": "\u227F",
"SuchThat": "\u220D",
"Sum": "\u2211",
"Sun": "\u2609",
"Superset": "\u2283",
"SupersetEqual": "\u2287",
"SystemEnterKey": "\uF75F",
"SystemsModelDelay": "\uF3AF",
"THacek": "\u0165",
"TabKey": "\uF7BE",
"Tau": "\u03C4",
"TaurusSign": "\u2649",
"TensorProduct": "\uF3DA",
"TensorWedge": "\uF3DB",
"Therefore": "\u2234",
"Theta": "\u03B8",
"ThickSpace": "\u2005",
"ThinSpace": "\u2009",
"Thorn": "\u00FE",
"Tilde": "\u223C",
"TildeEqual": "\u2243",
"TildeFullEqual": "\u2245",
"TildeTilde": "\u2248",
"Times": "\u00D7",
"Trademark": "\u2122",
"Transpose": "\uF3C7",
"TripleDot": "\uF758",
"TwoWayRule": "\uF120",
"UAcute": "\u00FA",
"UDoubleAcute": "\u0171",
"UDoubleDot": "\u00FC",
"UGrave": "\u00F9",
"UHat": "\u00FB",
"URing": "\u016F",
"UnderBrace": "\uFE38",
"UnderBracket": "\u23B5",
"UnderParenthesis": "\uFE36",
"UndirectedEdge": "\uF3D4",
"Union": "\u22C3",
"UnionPlus": "\u228E",
"UnknownGlyph": "\uFFFD",
"UpArrow": "\u2191",
"UpArrowBar": "\u2912",
"UpArrowDownArrow": "\u21C5",
"UpDownArrow": "\u2195",
"UpEquilibrium": "\u296E",
"UpPointer": "\u25B4",
"UpTee": "\u22A5",
"UpTeeArrow": "\u21A5",
"UpperLeftArrow": "\u2196",
"UpperRightArrow": "\u2197",
"Upsilon": "\u03C5",
"Uranus": "\u2645",
"VectorGreaterEqual": "\uF435",
"VectorGreater": "\uF434",
"VectorLessEqual": "\uF437",
"VectorLess": "\uF436",
"Vee": "\u22C1",
"Venus": "\u2640",
"VerticalBar": "\u2223",
"VerticalEllipsis": "\u22EE",
"VerticalLine": "\u2502",
"VerticalSeparator": "\uF432",
"VerticalTilde": "\u2240",
"VeryThinSpace": "\u200A",
"Villa": "\uF727",
"VirgoSign": "\u264D",
"WarningSign": "\uF725",
"WatchIcon": "\u231A",
"Wedge": "\u22C0",
"WeierstrassP": "\u2118",
"WhiteBishop": "\u2657",
"WhiteKing": "\u2654",
"WhiteKnight": "\u2658",
"WhitePawn": "\u2659",
"WhiteQueen": "\u2655",
"WhiteRook": "\u2656",
"Wolf": "\uF720",
"WolframAlphaPrompt": "\uF352",
"WolframLanguageLogoCircle": "\uF11F",
"WolframLanguageLogo": "\uF11E",
"Xi": "\u03BE",
"Xnor": "\uF4A2",
"Xor": "\u22BB",
"YAcute": "\u00FD",
"YDoubleDot": "\u00FF",
"Yen": "\u00A5",
"ZHacek": "\u017E",
"Zeta": "\u03B6",
}
aliased_characters = {
"a'": "\u00E1",
"a-": "\u0101",
"au": "\u0103",
'a"': "\u00E4",
"ae": "\u00E6",
"a`": "\u00E0",
"a^": "\u00E2",
"al": "\u2135",
"esc": "\uF768",
"am": "\uF760",
"a": "\u03B1",
"alpha": "\u03B1",
"alt": "\uF7D1",
"&&": "\u2227",
"and": "\u2227",
"Ang": "\u212B",
"ao": "\u00E5",
"a~": "\u00E3",
"\\": "\u2216",
"be": "\u2136",
"b": "\u03B2",
"beta": "\u03B2",
"bv": "\u02D8",
"bu": "\u2022",
"c'": "\u0107",
"A'": "\u00C1",
"A-": "\u0100",
"Au": "\u0102",
'A"': "\u00C4",
"AE": "\u00C6",
"A`": "\u00C0",
"A^": "\u00C2",
"A": "\u0391",
"Alpha": "\u0391",
"Ao": "\u00C5",
"A~": "\u00C3",
"B": "\u0392",
"Beta": "\u0392",
"C'": "\u0106",
"C,": "\u00C7",
"Cv": "\u010C",
"Ch": "\u03A7",
"Chi": "\u03A7",
"C": "\u03A7",
"D": "\u0394",
"Delta": "\u0394",
"Dv": "\u010E",
"DD": "\uF74B",
"Di": "\u03DC",
"Digamma": "\u03DC",
"E'": "\u00C9",
"E-": "\u0112",
"Eu": "\u0114",
'E"': "\u00CB",
"E`": "\u00C8",
"Ev": "\u011A",
"E^": "\u00CA",
"E": "\u0395",
"Epsilon": "\u0395",
"Et": "\u0397",
"Eta": "\u0397",
"H": "\u0397",
"D-": "\u00D0",
"G": "\u0393",
"Gamma": "\u0393",
"I'": "\u00CD",
"Iu": "\u012C",
'I"': "\u00CF",
"I`": "\u00CC",
"I^": "\u00CE",
"I": "\u0399",
"Iota": "\u0399",
"K": "\u039A",
"Kappa": "\u039A",
"Ko": "\u03DE",
"Koppa": "\u03DE",
"L": "\u039B",
"Lambda": "\u039B",
"L/": "\u0141",
"M": "\u039C",
"Mu": "\u039C",
"Nv": "\u0147",
"N~": "\u00D1",
"N": "\u039D",
"Nu": "\u039D",
"O'": "\u00D3",
"O''": "\u0150",
'O"': "\u00D6",
"OE": "\u0152",
"O`": "\u00D2",
"O^": "\u00D4",
"O": "\u03A9",
"Omega": "\u03A9",
"W": "\u03A9",
"Om": "\u039F",
"Omicron": "\u039F",
"O/": "\u00D8",
"O~": "\u00D5",
"Ph": "\u03A6",
"Phi": "\u03A6",
"F": "\u03A6",
"P": "\u03A0",
"Pi": "\u03A0",
"Ps": "\u03A8",
"Psi": "\u03A8",
"Y": "\u03A8",
"Rv": "\u0158",
"R": "\u03A1",
"Rho": "\u03A1",
"Sa": "\u03E0",
"Sampi": "\u03E0",
"Sv": "\u0160",
"S": "\u03A3",
"Sigma": "\u03A3",
"T": "\u03A4",
"Tau": "\u03A4",
"Tv": "\u0164",
"Th": "\u0398",
"Theta": "\u0398",
"Q": "\u0398",
"Thn": "\u00DE",
"U'": "\u00DA",
"U''": "\u0170",
'U"': "\u00DC",
"U`": "\u00D9",
"U^": "\u00DB",
"U": "\u03A5",
"Upsilon": "\u03A5",
"Uo": "\u016E",
"X": "\u039E",
"Xi": "\u039E",
"Y'": "\u00DD",
"Z": "\u0396",
"Zeta": "\u0396",
"Zv": "\u017D",
"c,": "\u00E7",
"cd": "\u00B8",
".": "\u00B7",
"cent": "\u00A2",
"cv": "\u010D",
"ch": "\u03C7",
"chi": "\u03C7",
"c": "\u03C7",
"c.": "\u2299",
"c-": "\u2296",
"c+": "\u2295",
"c*": "\u2297",
"ccint": "\u2232",
"cl": "\u2318",
":": "\u2236",
"cmd": "\uF76A",
"===": "\u2261",
"co": "\uF3C8",
"conj": "\uF3C8",
"ct": "\uF3C9",
"cont": "\uF3B1",
"cint": "\u222E",
"ctrl": "\uF763",
"coprod": "\u2210",
"cccint": "\u2233",
"cross": "\uF4A0",
"cU": "\u03D2",
"cUpsilon": "\u03D2",
"ce": "\u03B5",
"cepsilon": "\u03B5",
"ck": "\u03F0",
"ckappa": "\u03F0",
"j": "\u03C6",
"cph": "\u03C6",
"cphi": "\u03C6",
"cp": "\u03D6",
"cpi": "\u03D6",
"cr": "\u03F1",
"crho": "\u03F1",
"cq": "\u03D1",
"cth": "\u03D1",
"ctheta": "\u03D1",
"dg": "\u2020",
"da": "\u2138",
"-": "\u2013",
"deg": "\u00B0",
" del": "\uF7D0",
"del": "\u2207",
"d": "\u03B4",
"delta": "\u03B4",
"dv": "\u010F",
"dia": "\u22C4",
"diffd": "\u2206",
"dd": "\uF74C",
"di": "\u03DD",
"digamma": "\u03DD",
"dratio": "\uF4A4",
"shift": "\uF4A3",
"dhy": "\u00AD",
"dlsep": "\uF76E",
"dpsep": "\uF76F",
"div": "\u00F7",
".=": "\u2250",
"ddg": "\u2021",
"gg": "\uF74A",
"pp": "\uF749",
" <=": "\u21D0",
"<=>": "\u21D4",
"<==": "\u27F8",
"<==>": "\u27FA",
"==>": "\u27F9",
"''": "\u2033",
" =>": "\u21D2",
"dsa": "\uF6E6",
"dsb": "\uF6E7",
"dsc": "\uF6E8",
"dsA": "\uF7A4",
"dsB": "\uF7A5",
"dsC": "\uF7A6",
"dsD": "\uF7A7",
"dsE": "\uF7A8",
"dsF": "\uF7A9",
"dsG": | |
#!/usr/bin/env python
# coding: utf-8
# # Regularization
#
# Welcome to the second assignment of this week. Deep Learning models have so much flexibility and capacity that **overfitting can be a serious problem**, if the training dataset is not big enough. Sure it does well on the training set, but the learned network **doesn't generalize to new examples** that it has never seen!
#
# **You will learn to:** Use regularization in your deep learning models.
#
# Let's first import the packages you are going to use.
# In[1]:
# import packages
import numpy as np
import matplotlib.pyplot as plt
from reg_utils import sigmoid, relu, plot_decision_boundary, initialize_parameters, load_2D_dataset, predict_dec
from reg_utils import compute_cost, predict, forward_propagation, backward_propagation, update_parameters
import sklearn
import sklearn.datasets
import scipy.io
from testCases import *
get_ipython().run_line_magic('matplotlib', 'inline')
plt.rcParams['figure.figsize'] = (7.0, 4.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# **Problem Statement**: You have just been hired as an AI expert by the French Football Corporation. They would like you to recommend positions where France's goal keeper should kick the ball so that the French team's players can then hit it with their head.
#
# <img src="images/field_kiank.png" style="width:600px;height:350px;">
# <caption><center> <u> **Figure 1** </u>: **Football field**<br> The goal keeper kicks the ball in the air, the players of each team are fighting to hit the ball with their head </center></caption>
#
#
# They give you the following 2D dataset from France's past 10 games.
# In[2]:
train_X, train_Y, test_X, test_Y = load_2D_dataset()
# Each dot corresponds to a position on the football field where a football player has hit the ball with his/her head after the French goal keeper has shot the ball from the left side of the football field.
# - If the dot is blue, it means the French player managed to hit the ball with his/her head
# - If the dot is red, it means the other team's player hit the ball with their head
#
# **Your goal**: Use a deep learning model to find the positions on the field where the goalkeeper should kick the ball.
# **Analysis of the dataset**: This dataset is a little noisy, but it looks like a diagonal line separating the upper left half (blue) from the lower right half (red) would work well.
#
# You will first try a non-regularized model. Then you'll learn how to regularize it and decide which model you will choose to solve the French Football Corporation's problem.
# ## 1 - Non-regularized model
#
# You will use the following neural network (already implemented for you below). This model can be used:
# - in *regularization mode* -- by setting the `lambd` input to a non-zero value. We use "`lambd`" instead of "`lambda`" because "`lambda`" is a reserved keyword in Python.
# - in *dropout mode* -- by setting the `keep_prob` to a value less than one
#
# You will first try the model without any regularization. Then, you will implement:
# - *L2 regularization* -- functions: "`compute_cost_with_regularization()`" and "`backward_propagation_with_regularization()`"
# - *Dropout* -- functions: "`forward_propagation_with_dropout()`" and "`backward_propagation_with_dropout()`"
#
# In each part, you will run this model with the correct inputs so that it calls the functions you've implemented. Take a look at the code below to familiarize yourself with the model.
# In[3]:
def model(X, Y, learning_rate = 0.3, num_iterations = 30000, print_cost = True, lambd = 0, keep_prob = 1):
"""
Implements a three-layer neural network: LINEAR->RELU->LINEAR->RELU->LINEAR->SIGMOID.
Arguments:
X -- input data, of shape (input size, number of examples)
Y -- true "label" vector (1 for blue dot / 0 for red dot), of shape (output size, number of examples)
learning_rate -- learning rate of the optimization
num_iterations -- number of iterations of the optimization loop
print_cost -- If True, print the cost every 10000 iterations
lambd -- regularization hyperparameter, scalar
keep_prob - probability of keeping a neuron active during drop-out, scalar.
Returns:
parameters -- parameters learned by the model. They can then be used to predict.
"""
grads = {}
costs = [] # to keep track of the cost
m = X.shape[1] # number of examples
layers_dims = [X.shape[0], 20, 3, 1]
# Initialize parameters dictionary.
parameters = initialize_parameters(layers_dims)
# Loop (gradient descent)
for i in range(0, num_iterations):
# Forward propagation: LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID.
if keep_prob == 1:
a3, cache = forward_propagation(X, parameters)
elif keep_prob < 1:
a3, cache = forward_propagation_with_dropout(X, parameters, keep_prob)
# Cost function
if lambd == 0:
cost = compute_cost(a3, Y)
else:
cost = compute_cost_with_regularization(a3, Y, parameters, lambd)
# Backward propagation.
assert(lambd == 0 or keep_prob == 1) # it is possible to use both L2 regularization and dropout,
# but this assignment will only explore one at a time
if lambd == 0 and keep_prob == 1:
grads = backward_propagation(X, Y, cache)
elif lambd != 0:
grads = backward_propagation_with_regularization(X, Y, cache, lambd)
elif keep_prob < 1:
grads = backward_propagation_with_dropout(X, Y, cache, keep_prob)
# Update parameters.
parameters = update_parameters(parameters, grads, learning_rate)
# Print the loss every 10000 iterations
if print_cost and i % 10000 == 0:
print("Cost after iteration {}: {}".format(i, cost))
if print_cost and i % 1000 == 0:
costs.append(cost)
# plot the cost
plt.plot(costs)
plt.ylabel('cost')
plt.xlabel('iterations (x1,000)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
return parameters
# Let's train the model without any regularization, and observe the accuracy on the train/test sets.
# In[4]:
parameters = model(train_X, train_Y)
print("On the training set:")
predictions_train = predict(train_X, train_Y, parameters)
print("On the test set:")
predictions_test = predict(test_X, test_Y, parameters)
# The train accuracy is 94.8% while the test accuracy is 91.5%. This is the **baseline model** (you will observe the impact of regularization on this model). Run the following code to plot the decision boundary of your model.
# In[5]:
plt.title("Model without regularization")
axes = plt.gca()
axes.set_xlim([-0.75, 0.40])
axes.set_ylim([-0.75, 0.65])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
# The non-regularized model is obviously overfitting the training set. It is fitting the noisy points! Lets now look at two techniques to reduce overfitting.
# ## 2 - L2 Regularization
#
# The standard way to avoid overfitting is called **L2 regularization**. It consists of appropriately modifying your cost function, from:
# $$J = -\frac{1}{m} \sum\limits_{i = 1}^{m} \large{(}\small y^{(i)}\log\left(a^{[L](i)}\right) + (1-y^{(i)})\log\left(1- a^{[L](i)}\right) \large{)} \tag{1}$$
# To:
# $$J_{regularized} = \small \underbrace{-\frac{1}{m} \sum\limits_{i = 1}^{m} \large{(}\small y^{(i)}\log\left(a^{[L](i)}\right) + (1-y^{(i)})\log\left(1- a^{[L](i)}\right) \large{)} }_\text{cross-entropy cost} + \underbrace{\frac{1}{m} \frac{\lambda}{2} \sum\limits_l\sum\limits_k\sum\limits_j W_{k,j}^{[l]2} }_\text{L2 regularization cost} \tag{2}$$
#
# Let's modify your cost and observe the consequences.
#
# **Exercise**: Implement `compute_cost_with_regularization()` which computes the cost given by formula (2). To calculate $\sum\limits_k\sum\limits_j W_{k,j}^{[l]2}$ , use :
# ```python
# np.sum(np.square(Wl))
# ```
# Note that you have to do this for $W^{[1]}$, $W^{[2]}$ and $W^{[3]}$, then sum the three terms and multiply by $ \frac{1}{m} \frac{\lambda}{2} $.
# In[6]:
# GRADED FUNCTION: compute_cost_with_regularization
def compute_cost_with_regularization(A3, Y, parameters, lambd):
"""
Implement the cost function with L2 regularization. See formula (2) above.
Arguments:
A3 -- post-activation, output of forward propagation, of shape (output size, number of examples)
Y -- "true" labels vector, of shape (output size, number of examples)
parameters -- python dictionary containing parameters of the model
Returns:
cost - value of the regularized loss function (formula (2))
"""
m = Y.shape[1]
W1 = parameters["W1"]
W2 = parameters["W2"]
W3 = parameters["W3"]
cross_entropy_cost = compute_cost(A3, Y) # This gives you the cross-entropy part of the cost
### START CODE HERE ### (approx. 1 line)
L2_regularization_cost = lambd * (np.sum(np.square(W1)) + np.sum(np.square(W2)) + np.sum(np.square(W3))) / (2 * m)
### END CODER HERE ###
cost = cross_entropy_cost + L2_regularization_cost
return cost
# In[7]:
A3, Y_assess, parameters = compute_cost_with_regularization_test_case()
print("cost = " + str(compute_cost_with_regularization(A3, Y_assess, parameters, lambd = 0.1)))
# **Expected Output**:
#
# <table>
# <tr>
# <td>
# **cost**
# </td>
# <td>
# 1.78648594516
# </td>
#
# </tr>
#
# </table>
# Of course, because you changed the cost, you have to change backward propagation as well! All the gradients have to be computed with respect to this new cost.
#
# **Exercise**: Implement the | |
# Copyright (C) 2020 GreenWaves Technologies, SAS
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import math
from functools import reduce
from typing import Mapping, Sequence, Tuple
import numpy as np
from graph.types import (FilterParameters, MultiplicativeBiasParameters,
Parameters)
from quantization.multiplicative.mult_qtype_base import (MultQTypeBase,
WrapperMixin)
from quantization.multiplicative.symmetric.mult_mulbias_qtype_new import (
MultMulBiasQType, MultMulBiasScaleQType)
from quantization.multiplicative.symmetric.symmetric_mult_biases_qtype import \
SymmetricMultBiasesQType
from quantization.qtype import QType
from quantization.quantization_record_base import (
ConstantQuantizationRecordBase, HasConstantsBase,
InputOutputQuantizationRecordBase, QuantizationRecordBase,
ScalableFilterQuantizationRecordBase)
from sympy import Symbol
class MultQuantizationRecordBase(QuantizationRecordBase):
def __init__(self, *args, quant_mode="symmetric", info=None, **kwargs):
super(MultQuantizationRecordBase, self).__init__(*args, info=info, **kwargs)
if info is None:
self._info['quant_mode'] = quant_mode
self._cache = {}
def clear_cache(self):
self._cache = {}
def check_cache(self, name):
return self._cache.get(name)
def dequantize_as(self, tensor: np.ndarray, key_name: str, idx: int = None) -> np.ndarray:
qtype = getattr(self, key_name)
if idx is not None:
qtype = qtype[idx]
return qtype.dequantize(tensor)
def quantize_as(self, tensor: np.ndarray, key_name: str, idx: int = None) -> np.ndarray:
qtype = getattr(self, key_name)
if idx is not None:
qtype = qtype[idx]
return qtype.quantize(tensor)
def dequantize_wrapped(self, tensor: np.ndarray, key_name: str, idx: int = None) -> np.ndarray:
qtype = getattr(self, key_name)
if idx is not None:
qtype = qtype[idx]
if isinstance(qtype, WrapperMixin):
return qtype.wrapped.dequantize(tensor)
return qtype.dequantize(tensor)
def quantize_wrapped(self, tensor: np.ndarray, key_name: str, idx: int = None) -> np.ndarray:
qtype = getattr(self, key_name)
if idx is not None:
qtype = qtype[idx]
if isinstance(qtype, WrapperMixin):
return qtype.wrapped.quantize(tensor)
return qtype.quantize(tensor)
def requantize(self, tensor: np.ndarray, key_name: str, idx: int = None) -> np.ndarray:
qtype = getattr(self, key_name)
if idx is not None:
qtype = qtype[idx]
if isinstance(qtype, WrapperMixin):
tensor_fp = self.dequantize_wrapped(tensor, key_name, idx=idx)
tensor_sym = qtype.quantize(tensor_fp)
return tensor_sym
if tensor.dtype == np.float32:
return qtype.quantize(tensor)
return tensor
def confirm_dimension(self, out_c_idx: int, key_name: str):
qtype = getattr(self, key_name)
qtype.quantized_dimension = out_c_idx
class InputQuantizationMixin(MultQuantizationRecordBase):
def __init__(self, *args, auto_quantize_inputs=False, auto_dequantize_inputs=False, **kwargs):
super(InputQuantizationMixin, self).__init__(*args, **kwargs)
self._auto_quantize_inputs = auto_quantize_inputs
self._auto_dequantize_inputs = auto_dequantize_inputs
@property
def auto_quantize_inputs(self):
return self._auto_quantize_inputs
@auto_quantize_inputs.setter
def auto_quantize_inputs(self, val):
self._auto_quantize_inputs = val
@property
def auto_dequantize_inputs(self):
return self._auto_dequantize_inputs
@auto_quantize_inputs.setter
def auto_quantize_inputs(self, val):
self._auto_dequantize_inputs = val
def prepare_inputs(self, params: Parameters,
input_tensors: Sequence[np.ndarray], ktype: str = None) -> Sequence[np.ndarray]:
del params
if ktype == 'float32' and self.auto_dequantize_inputs:
return [self.dequantize_wrapped(input_tensor, "in_qs", idx=idx)
for idx, input_tensor in enumerate(input_tensors)]
if ktype == 'symmetric' and self.auto_quantize_inputs:
return [self.quantize_as(input_tensor, "in_qs", idx=idx) for idx, input_tensor in enumerate(input_tensors)]
return input_tensors
class OutputQuantizationMixin(MultQuantizationRecordBase):
def __init__(self, *args, auto_dequantize_outputs=False, auto_quantize_outputs=False, **kwargs):
super(OutputQuantizationMixin, self).__init__(*args, **kwargs)
self._auto_quantize_outputs = auto_quantize_outputs
self._auto_dequantize_outputs = auto_dequantize_outputs
@property
def auto_dequantize_outputs(self):
return self._auto_dequantize_outputs
@auto_dequantize_outputs.setter
def auto_dequantize_outputs(self, val):
self._auto_dequantize_outputs = val
@property
def auto_quantize_outputs(self):
return self._auto_dequantize_outputs
@auto_quantize_outputs.setter
def auto_quantize_outputs(self, val):
self._auto_dequantize_outputs = val
def get_outputs(self, params: Parameters,
output_tensors: Sequence[np.ndarray], ktype: str = None) -> Sequence[np.ndarray]:
del params
if ktype == 'symmetric':
if self._auto_dequantize_outputs:
return [self.dequantize_as(output_tensor, "out_qs", idx=idx)
for idx, output_tensor in enumerate(output_tensors)]
output_tensors = [self.out_qs[idx].clip(output_tensor)
for idx, output_tensor in enumerate(output_tensors)]
return output_tensors
class MultExpressionQuantizationRecord(InputQuantizationMixin, OutputQuantizationMixin, InputOutputQuantizationRecordBase):
def __init__(self, *args, inputs=None, output_exprs=None, intermediate_exprs=None, info=None, **kwargs):
super(MultExpressionQuantizationRecord, self).__init__(*args, info=info, **kwargs)
if info is None:
self._info['inputs'] = inputs
self._info['output_exprs'] = output_exprs
self._info['intermediate_exprs'] = intermediate_exprs
def _encapsulate(self):
rec = super(MultExpressionQuantizationRecord, self)._encapsulate()
rec['inputs'] = [sym.name for sym in self._info['inputs']]
return rec
@classmethod
def _dencapsulate(cls, val):
val['inputs'] = [Symbol(name) for name in val['inputs']]
return cls(info=val)
@property
def inputs(self):
return self._info['inputs']
@property
def outputs(self):
return self._info['outputs']
@property
def output_exprs(self):
return self._info['output_exprs']
@property
def intermediate_exprs(self):
return self._info['intermediate_exprs']
class MultQuantizationRecord(InputQuantizationMixin, OutputQuantizationMixin, InputOutputQuantizationRecordBase):
def __init__(self, *args, scale_mul_biases_q=None, info=None, **kwargs):
super(MultQuantizationRecord, self).__init__(*args, info=info, **kwargs)
if info is None:
self.scale_mul_biases_q = scale_mul_biases_q
@property
def scale_mul_biases_q(self):
mul_biases_q = self._info.get('scale_mul_biases_q')
if mul_biases_q is None:
mul_biases_q = MultMulBiasScaleQType(dtype=np.uint8)
self.scale_mul_biases_q = mul_biases_q
return mul_biases_q
@scale_mul_biases_q.setter
def scale_mul_biases_q(self, val):
self._info['scale_mul_biases_q'] = val
def set_scale(self, in_idx=0, out_idx=0, extra_scale=1):
if isinstance(in_idx, int):
in_scale = self.in_qs[in_idx].scale
else:
in_scale = reduce(lambda x, y: x * y, [self.in_qs[idx].scale for idx in in_idx])
if isinstance(out_idx, int):
out_scale = self.out_qs[out_idx].scale
else:
out_scale = reduce(lambda x, y: x * y, [self.out_qs[idx].scale for idx in out_idx])
scale_mul_biases_q = self.scale_mul_biases_q
scale = in_scale * extra_scale / out_scale
scale_mul_biases_q.scale = scale
class MultAddQuantizationRecord(MultQuantizationRecord):
def __init__(self, *args, scale_in_mul_biases_q=None, info=None, **kwargs):
super(MultAddQuantizationRecord, self).__init__(*args, info=info, **kwargs)
if info is None:
self._info['scale_in_mul_biases_q'] = scale_in_mul_biases_q
@property
def scale_in_mul_biases_q(self):
mul_biases_q = self._info.get('scale_in_mul_biases_q')
if mul_biases_q is None:
mul_biases_q = MultMulBiasScaleQType(dtype=np.uint8)
self.scale_in_mul_biases_q = mul_biases_q
return mul_biases_q
@scale_in_mul_biases_q.setter
def scale_in_mul_biases_q(self, val):
self._info['scale_in_mul_biases_q'] = val
@property
def scaled_idx(self):
return 1 if self.in_qs[1].scale > self.in_qs[0].scale else 0
def set_add_scale(self):
self.set_scale(in_idx=0 if self.scaled_idx else 1)
scale_in_mul_biases_q = self.scale_in_mul_biases_q
scaled_idx = self.scaled_idx
not_scaled_idx = 0 if scaled_idx else 1
scale = self.in_qs[scaled_idx].scale / self.in_qs[not_scaled_idx].scale
scale_in_mul_biases_q.scale = scale
class MultConstantQuantizationRecord(InputQuantizationMixin, InputOutputQuantizationRecordBase,
OutputQuantizationMixin, ConstantQuantizationRecordBase):
def gen_value(self, value):
return self.out_qs[0].get_quantized(value, container_is_quantized=self.constants_are_quantized)
class FilterQuantizationMixin(MultQuantizationRecord):
@property
def calc_q(self) -> MultQTypeBase:
return QType(bits=32, q=0, signed=True)
@property
def acc_q(self) -> MultQTypeBase:
return QType(bits=32, q=0, signed=True)
@property
def biases_q(self) -> SymmetricMultBiasesQType:
return self._info.get('biases_q')
@property
def weights_q(self) -> MultQTypeBase:
return self._info.get('weights_q')
@calc_q.setter
def calc_q(self, val: MultQTypeBase):
pass
@acc_q.setter
def acc_q(self, val: MultQTypeBase):
pass
@biases_q.setter
def biases_q(self, val: SymmetricMultBiasesQType):
self._info['biases_q'] = val
@weights_q.setter
def weights_q(self, val: MultQTypeBase):
self._info['weights_q'] = val
@staticmethod
def rescale(arr, from_scale, to_scale):
return np.floor((arr * from_scale/to_scale) + 0.5).astype(arr.dtype)
class MultScalableFilterQuantizationRecord(FilterQuantizationMixin, ScalableFilterQuantizationRecordBase):
def __init__(self, *args,
weights_q: MultQTypeBase = None,
biases_q: SymmetricMultBiasesQType = None,
mul_biases_q: Sequence[MultMulBiasQType] = None,
calc_q: QType = None,
acc_q: QType = None,
enable_prenorm=False,
info=None,
**kwargs):
super(MultScalableFilterQuantizationRecord, self).__init__(*args, info=info, **kwargs)
if info is None:
self._info['calc_q'] = calc_q
self._info['acc_q'] = acc_q
self._info['biases_q'] = biases_q
self._info['weights_q'] = weights_q
self._info['mul_biases_q'] = mul_biases_q
self._info['enable_prenorm'] = enable_prenorm
self.biases_q.link(self.weights_q, self.in_qs[0])
@property
def unwrap(self):
return self._unwrap
@unwrap.setter
def unwrap(self, val):
self._unwrap = val
self.biases_q.link(self.weights_q, self.in_qs[0])
def reorder_weigths(self, trans, dim):
if self.biases_q:
self.biases_q.reorder(trans, dim)
self.weights_q.reorder(trans, dim)
self.mul_biases_q.reorder(trans, dim)
def compute_prenorm(self, params: FilterParameters):
if not self.enable_prenorm:
return 0
max_bits = self.in_qs[0].bits - 1 + self.weights_q.bits - 1 + 1 + \
math.ceil(math.log2(params.filter.in_c * params.filter.h * params.filter.w))
spare_bits = 31 - max_bits
if self.mul_biases_q.dtype == np.int8:
bits = 7
elif self.mul_biases_q.dtype == np.uint8:
bits = 8
else:
raise ValueError("incorrect dtype")
return max(0, bits - spare_bits)
@property
def enable_prenorm(self) -> bool:
return self._info.get('enable_prenorm')
@enable_prenorm.setter
def enable_prenorm(self, val: bool):
self._info['enable_prenorm'] = val
@property
def mul_biases_q(self) -> MultMulBiasQType:
return self._info.get('mul_biases_q')
@mul_biases_q.setter
def mul_biases_q(self, val: MultMulBiasQType):
self._info['mul_biases_q'] = val
def get_quantized_bias_offset(self, params, weights):
# input zero correction is sum(W * Zin) by out_c if weights are channel scaled
axis = tuple([idx for idx in range(4) if idx != params.filter.get_order_idx('out_c')])
return np.sum(np.multiply(self.in_qs[0].zero_point,
weights,
dtype=np.int32),
dtype=np.int32,
axis=axis)
@property
def biases_zero_correction(self):
# output zero correction is So/(Si * Sw) * ZPo by out_c if weights are channel scaled
scale = self.out_qs[0].scale / (self.in_qs[0].scale * self.weights_q.scale)
return np.floor((self.out_qs[0].zero_point * scale) + 0.5).astype(np.int32)
def prepare_biases(self, params: Parameters, biases: np.ndarray,
weights: np.ndarray, ktype: str = None) -> np.ndarray:
if ktype == 'float32':
return self.biases_q.get_dequantized(biases,
container_is_quantized=self.constants_are_quantized).astype(np.float32)
if ktype == 'symmetric':
return self.gen_biases(params, biases, weights)
raise ValueError()
def prepare_weights(self, params: Parameters,
weights: np.ndarray, ktype: str = None) -> np.ndarray:
self.confirm_dimension(params.filter.get_order_idx('out_c'), 'weights_q')
if ktype == 'float32':
weights = self.weights_q.get_dequantized(weights,
container_is_quantized=self.constants_are_quantized)
return weights.astype(np.float32)
if ktype == 'symmetric':
return self.gen_weights(params, weights)
raise ValueError()
def gen_weights(self, params: Parameters, weights: np.ndarray) -> np.ndarray:
return self.weights_q.get_quantized(weights,
container_is_quantized=self.constants_are_quantized)
def gen_biases(self, params: Parameters, biases: np.ndarray, weights: np.ndarray) -> np.ndarray:
biases = self.biases_q.get_quantized(
biases, container_is_quantized=self.constants_are_quantized)
if self.in_qs[0].zero_point != 0:
biases -= self.get_quantized_bias_offset(params,
self.requantize(weights, 'weights_q'))
if self.out_qs[0].zero_point != 0:
biases += self.biases_zero_correction
return biases
def gen_mul_biases(self, params: MultiplicativeBiasParameters) -> np.ndarray:
if isinstance(self.mul_biases_q, MultMulBiasQType):
self.mul_biases_q.pre_normalization = self.compute_prenorm(params)
return self.mul_biases_q.qbiases
def apply_multiplicative_bias(self, params: FilterParameters, input_tensor: np.ndarray,
axis: int, ktype: str = None):
if ktype == 'float32':
return input_tensor
if ktype == 'symmetric':
if isinstance(self.mul_biases_q, MultMulBiasQType):
self.mul_biases_q.pre_normalization = self.compute_prenorm(params)
input_tensor = self.mul_biases_q.apply_scales(input_tensor, axis)
return input_tensor.astype(np.int32)
class MultSSDDetectorQuantizationRecord(MultQuantizationRecord):
def __init__(self, *args, scale_x_q=None, scale_x_anc_q=None, scale_y_q=None,
scale_y_anc_q=None, scale_h_q=None, scale_w_q=None, scale_ao_q=None,
info=None, **kwargs):
super(MultSSDDetectorQuantizationRecord, self).__init__(*args, info=info, **kwargs)
if info is None:
self._info['scale_x_q'] = scale_x_q
| |
# Center crop image
width, height = img.size
startx = width // 2 - (224 // 2)
starty = height // 2 - (224 // 2)
img = np.asarray(img).reshape(height, width, 3)
img = img[starty:starty + 224, startx:startx + 224]
assert img.shape[0] == 224 and img.shape[1] == 224, (img.shape, height, width)
# Save image
x_val_ref[:, :, :] = img[:, :, :]
# Convert image to tensor, then normalize and copy it
x_temp = torch.from_numpy(np.transpose(x_val_ref[:, :, :], (2, 0, 1)))
normalize = transforms.Normalize(mean = [0.485, 0.456, 0.406], std = [0.229, 0.224, 0.225])
for i in range(len(SF_transfer) * len(Ori_transfer)):
x_tensor_ref.append(normalize(x_temp))
x_tensor_ref = torch.stack(x_tensor_ref)
print(x_tensor_ref.shape)
# Select GPU
gpu = 0
os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print("Use GPU: {} for transfer".format(gpu))
# Set all the parameters of the model to be trainable
for param in model.parameters():
param.requires_grad = False
# Send the model to GPU/CPU
model = model.to(device)
# Model summary
print(model)
cudnn.benchmark = True
# Define the main validation parameters
start_session = 0
sessions = 10
for session in range(start_session, sessions):
z_val_shuffle = copy.deepcopy(z_val_transfer)
for j in range(len(SF_transfer)):
for k in range(len(Ori_transfer)):
random.shuffle(z_val_shuffle[j, k, :])
# Evaluate on the validation set
z_val_shuffle_1D = np.unique(z_val_shuffle[:, :, session])
indices = torch.tensor(z_val_shuffle_1D, dtype = torch.long)
x_valid = torch.index_select(x_tensor_transfer, 0, indices)
y_valid = torch.index_select(y_tensor_transfer, 0, indices)
y_valid = y_valid.squeeze(1)
batch_time = AverageMeter('Time', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Accuracy', ':6.2f')
progress = ProgressMeter(1, [batch_time, losses, top1], prefix = ("Transfer >>> Session: " + str(session) + " Epoch: [{}]").format(1))
# Switch to evaluating mode
model.eval()
with torch.no_grad():
end = time.time()
x_ref = x_tensor_ref.cuda(gpu)
x_valid = x_valid.cuda(gpu)
y_valid = y_valid.cuda(gpu)
# Compute output
output = model(x_valid, x_ref)
loss = criterion(output, y_valid)
# Measure accuracy and record loss
acc1 = accuracy(output, y_valid, topk = 1)
losses.update(loss.item(), x_valid.size(0))
top1.update(acc1[0], x_valid.size(0))
# Save the validation accuracy for plotting
all_simulation_transfer_accuracy[simulation_counter, group_counter, layer_freeze_counter, session - start_session] = acc1[0].item()
# Measure elapsed time
batch_time.update(time.time() - end)
progress.display(1)
# Remember the best accuracy
is_best = all_simulation_transfer_accuracy[simulation_counter, group_counter, layer_freeze_counter, session - start_session] >= best_acc1
best_acc1 = max(all_simulation_transfer_accuracy[simulation_counter, group_counter, layer_freeze_counter, session - start_session], best_acc1)
### Extracting the activations of convolutional layers of the network per transfer stimulus after training
# The indices of consecutive convolutional layers: (0, 3, 6, 8, 10)
# The sizes of consecutive convolutional layers: (55, 27, 13, 13, 13)
# The positions of central units of consecutive convolutional layers: (27, 13, 6, 6, 6)
# The number of channels of consecutive convolutional layers: (64, 192, 384, 256, 256)
os.mkdir(parent_folder + '/Simulation_' + str(simulation_counter + 1) + '/' + group_training + '/after_training_' + str(layer_freeze))
saving_folder = parent_folder + '/Simulation_' + str(simulation_counter + 1) + '/' + group_training + '/after_training_' + str(layer_freeze)
# The target stimuli
feature_sample_artiphysiology = np.zeros((num_sample_artiphysiology, 3), dtype = np.int64)
all_unit_activity_Conv2d_1 = np.zeros((num_sample_artiphysiology, 64, 55, 55), dtype = np.float32)
all_unit_activity_Conv2d_2 = np.zeros((num_sample_artiphysiology, 192, 27, 27), dtype = np.float32)
all_unit_activity_Conv2d_3 = np.zeros((num_sample_artiphysiology, 384, 13, 13), dtype = np.float32)
all_unit_activity_Conv2d_4 = np.zeros((num_sample_artiphysiology, 256, 13, 13), dtype = np.float32)
all_unit_activity_Conv2d_5 = np.zeros((num_sample_artiphysiology, 256, 13, 13), dtype = np.float32)
for i in range(num_sample_artiphysiology):
feature_sample_artiphysiology[i, :] = [SF_transfer[x_sample_artiphysiology_index[i, 0]], Ori_transfer[x_sample_artiphysiology_index[i, 1]], x_sample_artiphysiology_index[i, 2]]
index = torch.tensor(z_val_transfer[x_sample_artiphysiology_index[i, 0], x_sample_artiphysiology_index[i, 1], x_sample_artiphysiology_index[i, 2]], dtype = torch.long)
x_sample = torch.index_select(x_tensor_transfer, 0, index)
x_sample = x_sample.cuda(gpu)
unit_activity_layer_0 = model.features[0](x_sample)
unit_activity_layer_1 = model.features[1](unit_activity_layer_0)
unit_activity_layer_2 = model.features[2](unit_activity_layer_1)
unit_activity_layer_3 = model.features[3](unit_activity_layer_2)
unit_activity_layer_4 = model.features[4](unit_activity_layer_3)
unit_activity_layer_5 = model.features[5](unit_activity_layer_4)
unit_activity_layer_6 = model.features[6](unit_activity_layer_5)
unit_activity_layer_7 = model.features[7](unit_activity_layer_6)
unit_activity_layer_8 = model.features[8](unit_activity_layer_7)
unit_activity_layer_9 = model.features[9](unit_activity_layer_8)
unit_activity_layer_10 = model.features[10](unit_activity_layer_9)
unit_activity_layer_11 = model.features[11](unit_activity_layer_10)
unit_activity_layer_12 = model.features[12](unit_activity_layer_11)
all_unit_activity_Conv2d_1[i, :] = unit_activity_layer_0[0].detach().cpu().clone().numpy()
all_unit_activity_Conv2d_2[i, :] = unit_activity_layer_3[0].detach().cpu().clone().numpy()
all_unit_activity_Conv2d_3[i, :] = unit_activity_layer_6[0].detach().cpu().clone().numpy()
all_unit_activity_Conv2d_4[i, :] = unit_activity_layer_8[0].detach().cpu().clone().numpy()
all_unit_activity_Conv2d_5[i, :] = unit_activity_layer_10[0].detach().cpu().clone().numpy()
# Saving the properties of sample stimuli used for calculating intrinsic dimension
scipy.io.savemat(saving_folder + '/feature_sample_artiphysiology.mat', mdict = {'feature_sample_artiphysiology': feature_sample_artiphysiology})
### Calculating the variance explained by PCA
PCA_layer_1 = PCA(n_components = number_PCA_component).fit(all_unit_activity_Conv2d_1.reshape(num_sample_artiphysiology, -1))
PCA_layer_2 = PCA(n_components = number_PCA_component).fit(all_unit_activity_Conv2d_2.reshape(num_sample_artiphysiology, -1))
PCA_layer_3 = PCA(n_components = number_PCA_component).fit(all_unit_activity_Conv2d_3.reshape(num_sample_artiphysiology, -1))
PCA_layer_4 = PCA(n_components = number_PCA_component).fit(all_unit_activity_Conv2d_4.reshape(num_sample_artiphysiology, -1))
PCA_layer_5 = PCA(n_components = number_PCA_component).fit(all_unit_activity_Conv2d_5.reshape(num_sample_artiphysiology, -1))
all_PCA_explained_variance_layer_1[simulation_counter, group_counter, layer_freeze_counter, :] = PCA_layer_1.explained_variance_ratio_
all_PCA_explained_variance_layer_2[simulation_counter, group_counter, layer_freeze_counter, :] = PCA_layer_2.explained_variance_ratio_
all_PCA_explained_variance_layer_3[simulation_counter, group_counter, layer_freeze_counter, :] = PCA_layer_3.explained_variance_ratio_
all_PCA_explained_variance_layer_4[simulation_counter, group_counter, layer_freeze_counter, :] = PCA_layer_4.explained_variance_ratio_
all_PCA_explained_variance_layer_5[simulation_counter, group_counter, layer_freeze_counter, :] = PCA_layer_5.explained_variance_ratio_
### Calculating the mutual information of original and nuisance stimuli with layers' activities
# The indices of consecutive convolutional layers: (0, 3, 6, 8, 10)
# The sizes of consecutive convolutional layers: (55, 27, 13, 13, 13)
# The positions of central units of consecutive convolutional layers: (27, 13, 6, 6, 6)
# The number of channels of consecutive convolutional layers: (64, 192, 384, 256, 256)
phase_count = 20
counter = -1
x_tensor_training_original = np.zeros((len(SF_training) * len(Ori_training) * phase_count, 3, 224, 224), dtype = np.float32)
x_tensor_training_noise = np.zeros((len(SF_training) * len(Ori_training) * phase_count, 3, 224, 224), dtype = np.float32)
all_unit_activity_MI_Conv2d_1 = np.zeros((len(SF_training) * len(Ori_training) * phase_count, 64, 55, 55), dtype = np.float32)
all_unit_activity_MI_Conv2d_2 = np.zeros((len(SF_training) * len(Ori_training) * phase_count, 192, 27, 27), dtype = np.float32)
all_unit_activity_MI_Conv2d_3 = np.zeros((len(SF_training) * len(Ori_training) * phase_count, 384, 13, 13), dtype = np.float32)
all_unit_activity_MI_Conv2d_4 = np.zeros((len(SF_training) * len(Ori_training) * phase_count, 256, 13, 13), dtype = np.float32)
all_unit_activity_MI_Conv2d_5 = np.zeros((len(SF_training) * len(Ori_training) * phase_count, 256, 13, 13), dtype = np.float32)
for i in range(len(SF_training)):
for j in range(len(Ori_training)):
phase = np.random.permutation(180)[:phase_count]
for k in range(phase_count):
counter = counter + 1
indices_training_1 = torch.tensor(z_val_training[i, j, phase[k]], dtype = torch.long)
indices_training_2 = torch.tensor(z_val_training[int(len(SF_training) / 2 + 0.5) - 1, j, phase[k]], dtype = torch.long)
x_sample = torch.index_select(x_tensor_training, 0, indices_training_1)
x_sample = x_sample.cuda(gpu)
unit_activity_layer_0 = model.features[0](x_sample)
unit_activity_layer_1 = model.features[1](unit_activity_layer_0)
unit_activity_layer_2 = model.features[2](unit_activity_layer_1)
unit_activity_layer_3 = model.features[3](unit_activity_layer_2)
unit_activity_layer_4 = model.features[4](unit_activity_layer_3)
unit_activity_layer_5 = model.features[5](unit_activity_layer_4)
unit_activity_layer_6 = model.features[6](unit_activity_layer_5)
unit_activity_layer_7 = model.features[7](unit_activity_layer_6)
unit_activity_layer_8 = model.features[8](unit_activity_layer_7)
unit_activity_layer_9 = model.features[9](unit_activity_layer_8)
unit_activity_layer_10 = model.features[10](unit_activity_layer_9)
unit_activity_layer_11 = model.features[11](unit_activity_layer_10)
unit_activity_layer_12 = model.features[12](unit_activity_layer_11)
x_tensor_training_original[counter, :] = torch.index_select(x_tensor_training, 0, indices_training_1).detach().cpu().clone().numpy()
x_tensor_training_noise[counter, :] = (torch.index_select(x_tensor_training, 0, indices_training_1) - torch.index_select(x_tensor_training, 0, indices_training_2)).cuda(gpu)[0].detach().cpu().clone().numpy()
all_unit_activity_MI_Conv2d_1[counter, :] = unit_activity_layer_0[0].detach().cpu().clone().numpy()
all_unit_activity_MI_Conv2d_2[counter, :] = unit_activity_layer_3[0].detach().cpu().clone().numpy()
all_unit_activity_MI_Conv2d_3[counter, :] = unit_activity_layer_6[0].detach().cpu().clone().numpy()
all_unit_activity_MI_Conv2d_4[counter, :] = unit_activity_layer_8[0].detach().cpu().clone().numpy()
all_unit_activity_MI_Conv2d_5[counter, :] = unit_activity_layer_10[0].detach().cpu().clone().numpy()
### Calculating the mutual information between the original stimuli and layers activities
all_simulation_all_MI_original[simulation_counter, group_counter, 0, layer_freeze_counter] = EDGE(x_tensor_training_original.mean(axis = 1).reshape(len(SF_training) * len(Ori_training) * phase_count, -1), all_unit_activity_MI_Conv2d_1.mean(axis = 1).reshape(len(SF_training) * len(Ori_training) * phase_count, -1),
U = 10, gamma = [1, 1], epsilon_vector = 'range', eps_range_factor = 0.1, normalize_epsilon = False, ensemble_estimation = 'median', L_ensemble = 5, hashing = 'p-stable', stochastic = False)
all_simulation_all_MI_original[simulation_counter, group_counter, 1, layer_freeze_counter] = EDGE(x_tensor_training_original.mean(axis = 1).reshape(len(SF_training) * len(Ori_training) * phase_count, -1), all_unit_activity_MI_Conv2d_2.mean(axis = 1).reshape(len(SF_training) * len(Ori_training) * phase_count, -1),
U = 10, gamma = [1, 1], epsilon_vector = 'range', eps_range_factor = 0.1, normalize_epsilon = False, ensemble_estimation = 'median', L_ensemble = 5, hashing = 'p-stable', stochastic = False)
all_simulation_all_MI_original[simulation_counter, group_counter, 2, layer_freeze_counter] = EDGE(x_tensor_training_original.mean(axis = 1).reshape(len(SF_training) * len(Ori_training) * phase_count, -1), all_unit_activity_MI_Conv2d_3.mean(axis = 1).reshape(len(SF_training) * len(Ori_training) * phase_count, -1),
U = 10, gamma = [1, 1], epsilon_vector = 'range', eps_range_factor = 0.1, normalize_epsilon = False, ensemble_estimation = 'median', L_ensemble = 5, hashing = 'p-stable', stochastic = False)
all_simulation_all_MI_original[simulation_counter, group_counter, 3, layer_freeze_counter] = EDGE(x_tensor_training_original.mean(axis = 1).reshape(len(SF_training) * len(Ori_training) * phase_count, -1), all_unit_activity_MI_Conv2d_4.mean(axis = 1).reshape(len(SF_training) * len(Ori_training) * phase_count, -1),
U = 10, gamma = [1, 1], epsilon_vector = 'range', eps_range_factor = 0.1, normalize_epsilon = False, ensemble_estimation = 'median', L_ensemble = 5, hashing = 'p-stable', stochastic = False)
all_simulation_all_MI_original[simulation_counter, group_counter, 4, layer_freeze_counter] = EDGE(x_tensor_training_original.mean(axis = 1).reshape(len(SF_training) * len(Ori_training) * phase_count, -1), all_unit_activity_MI_Conv2d_5.mean(axis = 1).reshape(len(SF_training) * len(Ori_training) * phase_count, -1),
U = 10, gamma = [1, 1], epsilon_vector = 'range', eps_range_factor = 0.1, normalize_epsilon = False, ensemble_estimation = 'median', L_ensemble = 5, hashing = 'p-stable', stochastic = False)
### Calculating the mutual | |
<gh_stars>0
# coding: utf-8
"""
App Center Client
Microsoft Visual Studio App Center API # noqa: E501
OpenAPI spec version: preview
Contact: <EMAIL>
Project Repository: https://github.com/b3nab/appcenter-sdks
"""
import pprint
import re # noqa: F401
import six
class ReasonStackFrame(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
allowed enum values
"""
JavaScript = "JavaScript"
CSharp = "CSharp"
Objective-C = "Objective-C"
Objective-Cpp = "Objective-Cpp"
Cpp = "Cpp"
C = "C"
Swift = "Swift"
Java = "Java"
Unknown = "Unknown"
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'class_name': 'string',
'method': 'string',
'class_method': 'boolean',
'file': 'string',
'line': 'integer',
'app_code': 'boolean',
'framework_name': 'string',
'code_formatted': 'string',
'code_raw': 'string',
'language': 'string',
'method_params': 'string',
'exception_type': 'string',
'os_exception_type': 'string'
}
attribute_map = {
'class_name': 'class_name',
'method': 'method',
'class_method': 'class_method',
'file': 'file',
'line': 'line',
'app_code': 'app_code',
'framework_name': 'framework_name',
'code_formatted': 'code_formatted',
'code_raw': 'code_raw',
'language': 'language',
'method_params': 'method_params',
'exception_type': 'exception_type',
'os_exception_type': 'os_exception_type'
}
def __init__(self, class_name=None, method=None, class_method=None, file=None, line=None, app_code=None, framework_name=None, code_formatted=None, code_raw=None, language=None, method_params=None, exception_type=None, os_exception_type=None): # noqa: E501
"""ReasonStackFrame - a model defined in Swagger""" # noqa: E501
self._class_name = None
self._method = None
self._class_method = None
self._file = None
self._line = None
self._app_code = None
self._framework_name = None
self._code_formatted = None
self._code_raw = None
self._language = None
self._method_params = None
self._exception_type = None
self._os_exception_type = None
self.discriminator = None
if class_name is not None:
self.class_name = class_name
if method is not None:
self.method = method
if class_method is not None:
self.class_method = class_method
if file is not None:
self.file = file
if line is not None:
self.line = line
if app_code is not None:
self.app_code = app_code
if framework_name is not None:
self.framework_name = framework_name
if code_formatted is not None:
self.code_formatted = code_formatted
if code_raw is not None:
self.code_raw = code_raw
if language is not None:
self.language = language
if method_params is not None:
self.method_params = method_params
if exception_type is not None:
self.exception_type = exception_type
if os_exception_type is not None:
self.os_exception_type = os_exception_type
@property
def class_name(self):
"""Gets the class_name of this ReasonStackFrame. # noqa: E501
name of the class # noqa: E501
:return: The class_name of this ReasonStackFrame. # noqa: E501
:rtype: string
"""
return self._class_name
@class_name.setter
def class_name(self, class_name):
"""Sets the class_name of this ReasonStackFrame.
name of the class # noqa: E501
:param class_name: The class_name of this ReasonStackFrame. # noqa: E501
:type: string
"""
self._class_name = class_name
@property
def method(self):
"""Gets the method of this ReasonStackFrame. # noqa: E501
name of the method # noqa: E501
:return: The method of this ReasonStackFrame. # noqa: E501
:rtype: string
"""
return self._method
@method.setter
def method(self, method):
"""Sets the method of this ReasonStackFrame.
name of the method # noqa: E501
:param method: The method of this ReasonStackFrame. # noqa: E501
:type: string
"""
self._method = method
@property
def class_method(self):
"""Gets the class_method of this ReasonStackFrame. # noqa: E501
is a class method # noqa: E501
:return: The class_method of this ReasonStackFrame. # noqa: E501
:rtype: boolean
"""
return self._class_method
@class_method.setter
def class_method(self, class_method):
"""Sets the class_method of this ReasonStackFrame.
is a class method # noqa: E501
:param class_method: The class_method of this ReasonStackFrame. # noqa: E501
:type: boolean
"""
self._class_method = class_method
@property
def file(self):
"""Gets the file of this ReasonStackFrame. # noqa: E501
name of the file # noqa: E501
:return: The file of this ReasonStackFrame. # noqa: E501
:rtype: string
"""
return self._file
@file.setter
def file(self, file):
"""Sets the file of this ReasonStackFrame.
name of the file # noqa: E501
:param file: The file of this ReasonStackFrame. # noqa: E501
:type: string
"""
self._file = file
@property
def line(self):
"""Gets the line of this ReasonStackFrame. # noqa: E501
line number # noqa: E501
:return: The line of this ReasonStackFrame. # noqa: E501
:rtype: integer
"""
return self._line
@line.setter
def line(self, line):
"""Sets the line of this ReasonStackFrame.
line number # noqa: E501
:param line: The line of this ReasonStackFrame. # noqa: E501
:type: integer
"""
self._line = line
@property
def app_code(self):
"""Gets the app_code of this ReasonStackFrame. # noqa: E501
this line isn't from any framework # noqa: E501
:return: The app_code of this ReasonStackFrame. # noqa: E501
:rtype: boolean
"""
return self._app_code
@app_code.setter
def app_code(self, app_code):
"""Sets the app_code of this ReasonStackFrame.
this line isn't from any framework # noqa: E501
:param app_code: The app_code of this ReasonStackFrame. # noqa: E501
:type: boolean
"""
self._app_code = app_code
@property
def framework_name(self):
"""Gets the framework_name of this ReasonStackFrame. # noqa: E501
Name of the framework # noqa: E501
:return: The framework_name of this ReasonStackFrame. # noqa: E501
:rtype: string
"""
return self._framework_name
@framework_name.setter
def framework_name(self, framework_name):
"""Sets the framework_name of this ReasonStackFrame.
Name of the framework # noqa: E501
:param framework_name: The framework_name of this ReasonStackFrame. # noqa: E501
:type: string
"""
self._framework_name = framework_name
@property
def code_formatted(self):
"""Gets the code_formatted of this ReasonStackFrame. # noqa: E501
Formatted frame string # noqa: E501
:return: The code_formatted of this ReasonStackFrame. # noqa: E501
:rtype: string
"""
return self._code_formatted
@code_formatted.setter
def code_formatted(self, code_formatted):
"""Sets the code_formatted of this ReasonStackFrame.
Formatted frame string # noqa: E501
:param code_formatted: The code_formatted of this ReasonStackFrame. # noqa: E501
:type: string
"""
self._code_formatted = code_formatted
@property
def code_raw(self):
"""Gets the code_raw of this ReasonStackFrame. # noqa: E501
Unformatted Frame string # noqa: E501
:return: The code_raw of this ReasonStackFrame. # noqa: E501
:rtype: string
"""
return self._code_raw
@code_raw.setter
def code_raw(self, code_raw):
"""Sets the code_raw of this ReasonStackFrame.
Unformatted Frame string # noqa: E501
:param code_raw: The code_raw of this ReasonStackFrame. # noqa: E501
:type: string
"""
self._code_raw = code_raw
@property
def language(self):
"""Gets the language of this ReasonStackFrame. # noqa: E501
programming language of the frame # noqa: E501
:return: The language of this ReasonStackFrame. # noqa: E501
:rtype: string
"""
return self._language
@language.setter
def language(self, language):
"""Sets the language of this ReasonStackFrame.
programming language of the frame # noqa: E501
:param language: The language of this ReasonStackFrame. # noqa: E501
:type: string
"""
allowed_values = [undefined, undefined, undefined, undefined, undefined, undefined, undefined, undefined, undefined, ] # noqa: E501
self._language = language
@property
def method_params(self):
"""Gets the method_params of this ReasonStackFrame. # noqa: E501
parameters of the frames method # noqa: E501
:return: The method_params of this ReasonStackFrame. # noqa: E501
:rtype: string
"""
return self._method_params
@method_params.setter
def method_params(self, method_params):
"""Sets the method_params of this ReasonStackFrame.
parameters of the frames method # noqa: E501
:param method_params: The method_params of this ReasonStackFrame. # noqa: E501
:type: string
"""
self._method_params = method_params
@property
def exception_type(self):
"""Gets the exception_type of this ReasonStackFrame. # noqa: E501
Exception type. # noqa: E501
:return: The exception_type of this ReasonStackFrame. # noqa: E501
:rtype: string
"""
return self._exception_type
@exception_type.setter
def exception_type(self, exception_type):
"""Sets the exception_type of this ReasonStackFrame.
Exception type. # noqa: E501
:param exception_type: The exception_type of this ReasonStackFrame. # noqa: E501
:type: string
"""
self._exception_type = exception_type
@property
def os_exception_type(self):
"""Gets the os_exception_type of this ReasonStackFrame. # noqa: E501
OS exception type. (aka. SIGNAL) # noqa: E501
:return: The os_exception_type of this ReasonStackFrame. # noqa: E501
:rtype: string
"""
return self._os_exception_type
@os_exception_type.setter
def os_exception_type(self, os_exception_type):
"""Sets the os_exception_type of this ReasonStackFrame.
OS exception type. (aka. SIGNAL) # noqa: E501
:param os_exception_type: The os_exception_type of this ReasonStackFrame. # noqa: E501
:type: string
"""
self._os_exception_type = os_exception_type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] | |
#!/usr/bin/env python
from __future__ import print_function
import os
from os.path import splitext, join, isfile, isdir, basename
import argparse
import numpy as np
# from scipy import misc, ndimage
import tensorflow.keras.backend as K
from tensorflow.keras.models import model_from_json, load_model
import tensorflow as tf
import layers_builder as layers
from glob import glob
from utils import utils
from tensorflow.python.keras.utils.generic_utils import CustomObjectScope
import cv2
import math
from PIL import Image
# -- Fix for macos, uncomment it
# import matplotlib
# matplotlib.use('TkAgg')
# --
import matplotlib.pyplot as plt
from ade20k_labels import ade20k_label_dict as ade20k_labels
from imageio import imread
# These are the means for the ImageNet pretrained ResNet
DATA_MEAN = np.array([[[123.68, 116.779, 103.939]]]) # RGB order
class PSPNet(object):
"""Pyramid Scene Parsing Network by <NAME> et al 2017"""
def __init__(self, nb_classes, resnet_layers, input_shape, weights):
self.input_shape = input_shape
self.num_classes = nb_classes
json_path = join("weights", "keras", weights + ".json")
h5_path = join("weights", "keras", weights + ".h5")
if 'pspnet' in weights:
if os.path.isfile(json_path) and os.path.isfile(h5_path):
print("Keras model & weights found, loading...")
with CustomObjectScope({'Interp': layers.Interp}):
with open(json_path) as file_handle:
self.model = model_from_json(file_handle.read())
self.model.load_weights(h5_path)
else:
print("No Keras model & weights found, import from npy weights.")
self.model = layers.build_pspnet(nb_classes=nb_classes,
resnet_layers=resnet_layers,
input_shape=self.input_shape)
self.set_npy_weights(weights)
else:
print('Load pre-trained weights')
self.model = load_model(weights)
def predict(self, img, flip_evaluation=False):
"""
Predict segementation for an image.
Arguments:
img: must be rowsxcolsx3
"""
if img.shape[0:2] != self.input_shape:
print(
"Input %s not fitting for network size %s, resizing. You may want to try sliding prediction for better results." % (
img.shape[0:2], self.input_shape))
img = np.array(Image.fromarray(img).resize(size=self.input_shape))
# img = misc.imresize(img, self.input_shape)
img = img - DATA_MEAN
img = img[:, :, ::-1] # RGB => BGR
img = img.astype('float32')
probs = self.feed_forward(img, flip_evaluation)
return probs
def predict_sliding(self, full_img, flip_evaluation):
"""
Predict on tiles of exactly the network input shape.
This way nothing gets squeezed.
"""
tile_size = self.input_shape
classes = self.num_classes
overlap = 1 / 3
stride = math.ceil(tile_size[0] * (1 - overlap))
tile_rows = max(int(math.ceil((full_img.shape[0] - tile_size[0]) / stride) + 1), 1) # strided convolution formula
tile_cols = max(int(math.ceil((full_img.shape[1] - tile_size[1]) / stride) + 1), 1)
print("Need %i x %i prediction tiles @ stride %i px" % (tile_cols, tile_rows, stride))
full_probs = np.zeros((full_img.shape[0], full_img.shape[1], classes))
count_predictions = np.zeros((full_img.shape[0], full_img.shape[1], classes))
tile_counter = 0
for row in range(tile_rows):
for col in range(tile_cols):
x1 = int(col * stride)
y1 = int(row * stride)
x2 = min(x1 + tile_size[1], full_img.shape[1])
y2 = min(y1 + tile_size[0], full_img.shape[0])
x1 = max(int(x2 - tile_size[1]), 0) # for portrait images the x1 underflows sometimes
y1 = max(int(y2 - tile_size[0]), 0) # for very few rows y1 underflows
img = full_img[y1:y2, x1:x2]
padded_img = self.pad_image(img, tile_size)
plt.imshow(padded_img)
plt.show()
tile_counter += 1
print("Predicting tile %i" % tile_counter)
padded_prediction = self.predict(padded_img, flip_evaluation)
prediction = padded_prediction[0:img.shape[0], 0:img.shape[1], :]
count_predictions[y1:y2, x1:x2] += 1
full_probs[y1:y2, x1:x2] += prediction # accumulate the predictions also in the overlapping regions
# average the predictions in the overlapping regions
full_probs /= count_predictions
# visualize normalization Weights
# plt.imshow(np.mean(count_predictions, axis=2))
# plt.show()
return full_probs
@staticmethod
def pad_image(img, target_size):
"""Pad an image up to the target size."""
rows_missing = target_size[0] - img.shape[0]
cols_missing = target_size[1] - img.shape[1]
padded_img = np.pad(img, ((0, rows_missing), (0, cols_missing), (0, 0)), 'constant')
return padded_img
def predict_multi_scale(self, img, flip_evaluation, sliding_evaluation, scales):
"""Predict an image by looking at it with different scales."""
full_probs = np.zeros((img.shape[0], img.shape[1], self.num_classes))
h_ori, w_ori = img.shape[:2]
print("Started prediction...")
for scale in scales:
print("Predicting image scaled by %f" % scale)
scaled_img = np.array(Image.fromarray(img).resize(size=(int(scale * img.shape[0]), int(scale * img.shape[1])), resample=Image.BILINEAR))
# scaled_img = misc.imresize(img, size=scale, interp="bilinear")
if sliding_evaluation:
scaled_probs = self.predict_sliding(scaled_img, flip_evaluation)
else:
scaled_probs = self.predict(scaled_img, flip_evaluation)
# scale probs up to full size
# visualize_prediction(probs)
probs = cv2.resize(scaled_probs, (w_ori, h_ori))
full_probs += probs
full_probs /= len(scales)
print("Finished prediction...")
return full_probs
def feed_forward(self, data, flip_evaluation=False):
assert data.shape == (self.input_shape[0], self.input_shape[1], 3)
if flip_evaluation:
print("Predict flipped")
input_with_flipped = np.array(
[data, np.flip(data, axis=1)])
prediction_with_flipped = self.model.predict(input_with_flipped)
prediction = (prediction_with_flipped[
0] + np.fliplr(prediction_with_flipped[1])) / 2.0
else:
prediction = self.model.predict(np.expand_dims(data, 0))[0]
return prediction
def set_npy_weights(self, weights_path):
npy_weights_path = join("weights", "npy", weights_path + ".npy")
json_path = join("weights", "keras", weights_path + ".json")
h5_path = join("weights", "keras", weights_path + ".h5")
print("Importing weights from %s" % npy_weights_path)
weights = np.load(npy_weights_path, encoding='bytes').item()
for layer in self.model.layers:
print(layer.name)
if layer.name[:4] == 'conv' and layer.name[-2:] == 'bn':
mean = weights[layer.name.encode()][
'mean'.encode()].reshape(-1)
variance = weights[layer.name.encode()][
'variance'.encode()].reshape(-1)
scale = weights[layer.name.encode()][
'scale'.encode()].reshape(-1)
offset = weights[layer.name.encode()][
'offset'.encode()].reshape(-1)
self.model.get_layer(layer.name).set_weights(
[scale, offset, mean, variance])
elif layer.name[:4] == 'conv' and not layer.name[-4:] == 'relu':
try:
weight = weights[layer.name.encode()]['weights'.encode()]
self.model.get_layer(layer.name).set_weights([weight])
except Exception as err:
biases = weights[layer.name.encode()]['biases'.encode()]
self.model.get_layer(layer.name).set_weights([weight,
biases])
print('Finished importing weights.')
print("Writing keras model & weights")
json_string = self.model.to_json()
with open(json_path, 'w') as file_handle:
file_handle.write(json_string)
self.model.save_weights(h5_path)
print("Finished writing Keras model & weights")
class PSPNet50(PSPNet):
"""Build a PSPNet based on a 50-Layer ResNet."""
def __init__(self, nb_classes, weights, input_shape):
PSPNet.__init__(self, nb_classes=nb_classes, resnet_layers=50,
input_shape=input_shape, weights=weights)
class PSPNet101(PSPNet):
"""Build a PSPNet based on a 101-Layer ResNet."""
def __init__(self, nb_classes, weights, input_shape):
PSPNet.__init__(self, nb_classes=nb_classes, resnet_layers=101,
input_shape=input_shape, weights=weights)
def update_weights_json_to_python36p(dataset):
if dataset == 'cityscapes':
model = layers.build_pspnet(nb_classes=19, resnet_layers=101, input_shape=(713, 713), activation='softmax')
with open('weights/keras/pspnet101_cityscapes.json', 'w') as json_file:
json_file.write(model.to_json())
elif dataset == 'ade20k':
model = layers.build_pspnet(nb_classes=150, resnet_layers=50, input_shape=(473, 473), activation='softmax')
with open('weights/keras/pspnet50_ade20k.json', 'w') as json_file:
json_file.write(model.to_json())
def main(args):
# Handle input and output args
images = sorted(glob(args.glob_path))[::args.glob_interval] if args.glob_path else [args.input_path, ]
if args.glob_path:
fn, ext = splitext(args.output_path)
if ext:
parser.error("output_path should be a folder for multiple file input")
if not isdir(args.output_path):
os.mkdir(args.output_path)
# Predict
os.environ["CUDA_VISIBLE_DEVICES"] = args.id
sess = tf.Session()
K.set_session(sess)
with sess.as_default():
print(args)
if not args.weights:
if "pspnet50" in args.model:
pspnet = PSPNet50(nb_classes=150, input_shape=(473, 473),
weights=args.model)
elif "pspnet101" in args.model:
if "cityscapes" in args.model:
pspnet = PSPNet101(nb_classes=19, input_shape=(713, 713),
weights=args.model)
if "voc2012" in args.model:
pspnet = PSPNet101(nb_classes=21, input_shape=(473, 473),
weights=args.model)
else:
print("Network architecture not implemented.")
else:
# pspnet = PSPNet50(nb_classes=2, input_shape=(
# 768, 480), weights=args.weights)
pspnet = PSPNet50(nb_classes=2, input_shape=(
473, 473), weights=args.weights)
EVALUATION_SCALES = [1.0]
if args.multi_scale:
EVALUATION_SCALES = [0.5, 0.75, 1.0, 1.25, 1.5, 1.75] # must be all floats! Taken from original paper
for i, img_path in enumerate(images):
# if '287799' not in img_path: continue
print("Processing image {} / {}".format(i + 1, len(images)))
img = imread(img_path, pilmode='RGB')
cv2.fillPoly(img, [np.array([[700, 0], [799, 100], [799, 0]])], 0)
cv2.fillPoly(img, [np.array([[0, 100], [100, 0], [0, 0]])], 0)
probs = pspnet.predict_multi_scale(img, args.flip, args.sliding, EVALUATION_SCALES)
# cm = np.argmax(probs, axis=2)
pm = np.sort(probs, axis=2)[..., ::-1]
cm = np.argsort(probs, axis=2)[..., ::-1]
# pm = probs[cm[..., predlayer_i]]
colored_class_image = utils.color_class_image(cm[..., 0], args.model)
alpha_blended = 0.5 * colored_class_image + 0.5 * img
if args.glob_path:
input_filename, ext = splitext(basename(img_path))
filename = join(args.output_path, input_filename)
else:
filename, ext = splitext(args.output_path)
# cv2.imwrite(filename + "_seg_read" + ext, cm)
# cv2.imwrite(filename + "_seg" + ext, colored_class_image)
# cv2.imwrite(filename + "_probs" + ext, pm)
# cv2.imwrite(filename + "_seg_blended" + ext, alpha_blended)
sky_orig = np.where((colored_class_image != ade20k_labels['sky']).any(axis=-1), 255, 0).astype(np.uint8)
mask_building = np.where((colored_class_image == ade20k_labels['building']).any(axis=-1))
mask_tree = np.where((colored_class_image == ade20k_labels['tree']).any(axis=-1))
sky_orig[mask_building[0], mask_building[1]] = 205
sky_orig[mask_tree[0], mask_tree[1]] = 155
sky_orig[int(sky_orig.shape[0] * 2 / 3):, ...] = 255
fn_split = filename.split('/')
cv2.imwrite('/'.join(fn_split[:-1] + ['/superpixels2/'] + fn_split[-1:]) + ext,
sky_orig.astype(np.uint8))
# msk_border_np = \
# cv2.dilate(sky_orig.astype('uint8'), cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (50, 50)),
# iterations=1) - \
# cv2.erode(sky_orig.astype('uint8'), cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (100, 100)),
# iterations=1)
# sky = np.where(msk_border_np, 128, sky_orig)
# sky = np.where((colored_class_image == [180, 120, 120]).all(axis=-1) * (sky == 128), 255, sky).astype(np.uint8)
# sky[int(sky.shape[0] * 2 / 3):, ...] = 255
# fn_split = filename.split('/')
# cv2.imwrite('/'.join(fn_split[:-1] + ['/trimap/'] + fn_split[-1:]) + ext, sky)
# cv2.imwrite('/'.join(fn_split[:-1] + ['/image/'] + fn_split[-1:]) + ext, img[..., ::-1])
# cv2.imwrite('/'.join(fn_split[:-1] + ['/seg_orig/'] + fn_split[-1:]) + ext,
# np.where(sky_orig[..., None], img[..., ::-1], img[..., ::-1] // 2))
# cm2 = np.where(pm[..., 1] > pm[..., 0] / 10, cm[..., 1], cm[..., 0])
# # colored_class_image2 = utils.color_class_image(cm2, args.model)
# colored_class_image2 = utils.color_class_image(cm[...,1], args.model)
# sky_orig2 = np.where((colored_class_image2 != [6, 230, 230]).any(axis=-1), 255, 0).astype(np.uint8)
# sky_multilayer = np.dstack([sky_orig, sky_orig2, sky_orig2])
# cv2.imwrite('/'.join(fn_split[:-1] + ['/superpixels/'] + fn_split[-1:]) + ext, sky_multilayer.astype(np.uint8))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--model', type=str, default='pspnet50_ade20k',
help='Model/Weights to use',
choices=['pspnet50_ade20k',
'pspnet101_cityscapes',
'pspnet101_voc2012'])
parser.add_argument('-w', '--weights', type=str, default=None)
# parser.add_argument('-i', '--input_path', type=str, default='example_images/frame-000000115873.png', help='Path the input image')
# parser.add_argument('-g', '--glob_path', type=str, default=None, help='Glob path for multiple images')
| |
<gh_stars>0
from flask_restplus import Namespace, fields
import socket
import time
from datetime import datetime, date, time, timedelta
from flask import jsonify, request
from database import DB
from bson import json_util, ObjectId
from apis.utils.common import *
from apis.libraries.send_mail import Send_mail
import requests
import logging
import apis.utils.message_constants as MSG_CONST
import apis.utils.constants as CONST
from werkzeug.security import generate_password_hash, check_password_hash
import sys
import dateutil
import isodate
import datetime
import time
tbl_u002_booking = "u002_booking"
tbl_v004_outlets = "v004_outlets"
tbl_u001_users = "u001_users"
tbl_v012_services = "v012_services"
tbl_a001_email_template = "a001_email_template"
def getAvailableSlotModel(self, ns):
getSlot = ns.model('getAvailableSlotModel', {
'outlet_id':fields.String(required=False, description="Provide newly added outlet id" ),
'user_id': fields.String(required=True, min_length=1, example=1, description='User id is required'),
'vendor_id': fields.String(required=True, min_length=1, example=1, description='vendor id is required'),
'service_id': fields.String(required=True, min_length=1, description='Service id is required'),
"date" : fields.String(required=False, description='Outlet service'),
"gender" : fields.String(required=True, description='Massager gender'),
"duration":fields.Integer(required=True, description='DUration of service'),
})
return getSlot
def createOrderModel(self, ns):
getSlot = ns.model('getAvailableSlotModel', {
'outlet_id':fields.String(required=True, description="Provide newly added outlet id" ),
'user_id': fields.String(required=True, min_length=1, example=1, description='User id is required'),
'vendor_id': fields.String(required=True, min_length=1, example=1, description='vendor id is required'),
'booking_date': fields.String(required=True, min_length=1, description='Service id is required'),
"payment_method" : fields.String(required=True, description='Provide valid payment method'),
"guest_details" : fields.List(fields.Raw,required=False, description='Massager gender')
})
return getSlot
def get_booking_data(self,outlet_id):
data={}
# todaydate = datetime.datetime.now()
# y = todaydate.strftime("%m-%d-%Y %H:%M:%S")
# ts = int(time.time())
# upcoming = DB.find_all_where("u002_booking",{"$and":[{"spa.services.start_timestamp":{"$gt": (ts)}},{"spa.outlet_id":str(outlet_id),"status":int(2)}]},"ALL")
# history = DB.find_all_where("u002_booking",{"$and":[{"spa.services.start_timestamp":{"$lt": (ts)}},{"spa.outlet_id":str(outlet_id),"status":int(2)}]},"ALL")
# both_data["upcoming_data"]=upcoming
# both_data["history_data"]=history
# if upcoming and history == None:
# return output_json("", MSG_CONST.VENDOR_BOOKINGDATA_FAILED, False, 201)
# else:
# return output_json(booking, MSG_CONST.VENDOR_BOOKINGDATA_SUCCESS, True, 200)
selectField = {"user_id":1,"booking_id":1,"booking_no":1,"spa":1,"outlet_id":1,"created_date":1,"payment_status":1,'price':1,'services':1}
if outlet_id != "":
booking = DB.find_all_where("u002_booking",{"$and":[{"spa.outlet_id":ObjectId(outlet_id),"status":int(2)}]},"ALL")
data['booking']=booking
else:
data['booking']=[]
return output_json(data, MSG_CONST.VENDOR_BOOKINGDATA_SUCCESS, True, 200)
def get_calendar_data(self,outlet_id):
todaydate = datetime.datetime.now()
y = todaydate.strftime("%m-%d-%Y %H:%M:%S")
ts = int(time.time())
get_calendar=[]
get_service = []
selectField = {"_id":1,"user_id":1,"booking_id":1,"booking_no":1,"spa":1,"outlet_id":1,"created_date":1,"payment_status":1,'price':1,'services':1}
# Booking = DB.find_all_where("u002_booking",{"$and":[{"spa.services.start_timestamp":{"$gt": (ts)}},{"spa.outlet_id":ObjectId(outlet_id),"status":int(2)}]},"ALL")
if outlet_id != "":
Booking = DB.find_all_where("u002_booking",{"$and":[{"spa.outlet_id":ObjectId(outlet_id),"status":int(2)}]},"ALL")
outlet_name = DB.find_one(tbl_v004_outlets,{"_id":ObjectId(outlet_id)},{"name":1,"_id":0})
else:
Booking = []
outlet_name = []
# get_calendar.append(outlet_name)
for x in Booking:
# get_calendar.append(x['_id'])
for spa_data in x['spa']:
for i in spa_data['services']:
service_cnt=0
i["start_time"] = datetime.datetime.fromtimestamp(i["start_timestamp"]).strftime("%Y-%m-%d %H:%M:%S %p")
i["end_time"] = datetime.datetime.fromtimestamp(i["end_timestamp"]).strftime("%Y-%m-%d %H:%M:%S %p")
i['outlet_name']=outlet_name['name']
i['outlet_id']=outlet_id
i['booking_id']=x['_id']['$oid']
get_service.append(i)
# get_calendar.append(get_service)
service_cnt=service_cnt+1
if Booking == None:
return output_json("", MSG_CONST.VENDOR_BOOKINGDATA_FAILED, False, 201)
else:
return output_json(get_service, MSG_CONST.VENDOR_BOOKINGDATA_SUCCESS, True, 200)
def calendar_status(self):
todayDate = datetime.datetime.now()
ip_address = socket.gethostbyname(socket.gethostname())
requestData = dict(request.json)
outlet_id = requestData['outlet_id']
booking_id = requestData['booking_id']
cal_update={}
cal_update['update_ip'] = ip_address
cal_update['update_date'] = todayDate
if requestData['status_button'] == "approve":
cal_update['status'] = 2
cal_status = DB.find_one("u002_booking",{"spa.outlet_id":str(outlet_id)},"ALL")
if cal_status:
cal_update = DB.update_one("u002_booking",cal_update,{"_id":ObjectId(booking_id)})
elif requestData['status_button'] == "cancel":
cal_update['status'] = 3
cal_status = DB.find_one("u002_booking",{"spa.outlet_id":str(outlet_id)},"ALL")
if cal_status:
cal_update = DB.update_one("u002_booking",cal_update,{"_id":ObjectId(booking_id)})
if cal_update == None:
return output_json({}, MSG_CONST.VENDOR_BOOKINGDATA_FAILED, False, 201)
else:
return output_json(cal_update, MSG_CONST.VENDOR_BOOKINGDATA_SUCCESS, True, 200)
"""
Created By : <NAME>
"""
def getAvailableSlot(self):
status = True
message = ""
code = 200
responseData = {}
todayDate = datetime.datetime.now()
ip_address = socket.gethostbyname(socket.gethostname())
requestData = dict(request.json)
isAvailable = True
outlet_id = ObjectId(requestData['outlet_id'])
userId = ObjectId(requestData['user_id'])
vendor_id = ObjectId(requestData['vendor_id'])
serivce_id = ObjectId(requestData['service_id']) if "service_id" in requestData and requestData["service_id"] else ""
gender = requestData['gender'].lower()
bookingDateStr = requestData['date']
bookingDate = requestData['date'].split("-")
bookingDay = datetime.datetime(int(bookingDate[0]),int(bookingDate[1]), int(bookingDate[2]))
bookingDay = bookingDay.strftime("%A").lower()
gender = [gender.lower()]
durationService = requestData["duration"] if requestData["duration"] > 0 else 30
getOutLetdata = DB.find_one(tbl_v004_outlets,{"_id":outlet_id},{"_id":0,"name":1,"timings."+bookingDay:1,"staff_count":1,"male_count":1,"female_count":1})
availableSlot = []
if getOutLetdata:
isOpen = getOutLetdata["timings"][bookingDay]["isOpen"]
if isOpen == True or isOpen == "true":
getBuffer = DB.find_one(tbl_v012_services,{"_id":serivce_id},{"_id":0,"buffer":1,"prices."+str(durationService):1})
if getBuffer:
print(getBuffer)
staff_count = int(getOutLetdata["staff_count"]) if "staff_count" in getOutLetdata else 0
male_count = int(getOutLetdata["male_count"]) if "male_count" in getOutLetdata else 0
female_count = int(getOutLetdata["female_count"]) if "female_count" in getOutLetdata else 0
if requestData['gender'].lower() == "any" and int(male_count) + int(female_count) == 0:
responseData = {}
message = "No employee available"
status = False
elif requestData['gender'].lower() == 'male' and male_count == 0:
responseData = {}
message = "No male employee available"
status = False
elif requestData['gender'].lower() == 'female' and female_count == 0:
responseData = {}
message = "No employee available"
status = False
else:
bufferStart = int(getBuffer["buffer"]["before"]) if "buffer" in getBuffer and "before" in getBuffer["buffer"] and getBuffer["buffer"]["before"] else 15
bufferEnd = int(getBuffer["buffer"]["after"]) if "buffer" in getBuffer and "after" in getBuffer["buffer"] and getBuffer["buffer"]["after"] else 15
slotsList = getOutLetdata["timings"][bookingDay]["slots"]
if slotsList:
bookingWhere = {}
bookingWhere["spa.outlet_id"] = {"$in":[ObjectId(requestData['outlet_id'])]}
bookingWhere["spa.services.date"] = {"$eq":requestData['date']}
bookingData = DB.find_by_key(tbl_u002_booking,bookingWhere,{"spa.services":1,"_id":0})
boockedService = []
for s in range(len(bookingData)):
for x in range(len(bookingData[s]["spa"])):
for ch in range(len(bookingData[s]["spa"][x]["services"])):
boockedService.append(bookingData[s]["spa"][x]["services"][ch])
for sl in range(len(slotsList)):
startTime = slotsList[sl]["startTime"]
endTime = slotsList[sl]["endTime"]
startTime = startTime.split(" ")
startTime = bookingDateStr+ " " + convert24(startTime[0]+":00" + startTime[1])
endTime = endTime.split(" ")
endTime = bookingDateStr+ " " + convert24(endTime[0]+":00" + endTime[1])
startTime = datetime.datetime.strptime(startTime, '%Y-%m-%d %H:%M:%S')
endTime = datetime.datetime.strptime(endTime, '%Y-%m-%d %H:%M:%S')
i = 0
while startTime <= endTime:
start_timestamp = int(datetime.datetime.timestamp(startTime))
isAvailable = True
genderCount = {"male":male_count,"female":female_count,"any":male_count + female_count}
totalEmployee = male_count + female_count
for x in range(len(boockedService)):
startBookedTime = int(boockedService[x]["start_timestamp"]) - (int(bufferStart) * 60)
boockedService[x]["duration"] = int(boockedService[x]["duration"]) if int(boockedService[x]["duration"]) > 0 else 30
end_timestamp = start_timestamp
endBookedTime = startBookedTime + (int(boockedService[x]["duration"]) * 60) + bufferEnd * 60
prefer = boockedService[x]["prefer"].lower()
if int(genderCount[requestData['gender']]) <= 0 or totalEmployee <= 0:
isAvailable = False
elif startBookedTime >= start_timestamp and startBookedTime <= end_timestamp or endBookedTime >= start_timestamp and endBookedTime >= end_timestamp:
isAvailable = True
genderCount[requestData['gender']] = int(genderCount[requestData['gender']]) - 1
totalEmployee = totalEmployee - 1
else:
isAvailable = True
price = getBuffer["prices"][str(durationService)] if str(durationService) in getBuffer["prices"] else False
availableSlot.append({"availibility":isAvailable,"startTime":datetime.datetime.strftime(startTime, '%I:%M %p'),"duration":requestData["duration"],"remain":totalEmployee,"price":price})
startTime = startTime + datetime.timedelta(minutes=15)
i = i + 1
responseData = availableSlot
else:
status = False
message = "No slots created"
else:
status = False
else:
status = False
message = "Today is close"
else:
status = False
response = output_json(responseData,message,status,code)
return response
def createBooking(self):
status = True
message = ""
code = 200
responseData = {}
requestData = request.json
todaydate = get_current_date().strftime("%Y-%m-%d %H:%M:%S")
outlet_id = ObjectId(requestData['outlet_id'])
userId = ObjectId(requestData['user_id'])
vendor_id = ObjectId(requestData['vendor_id'])
outletData = DB.find_one(tbl_v004_outlets,{"_id":outlet_id},{"_id":0,"name":1,"allow_customers_gender":1})
guest_details = requestData["guest_details"]
bookingDate = requestData["booking_date"]
insArr = {}
insArr["user_id"] = None
insArr["temp_user_id"] = None
insArr["status"] = 2 # default
insArr["spa"] = []
bookPrice = 0.0
guestDetails = []
for x in range(len(guest_details)):
spaObj = {}
spaObj["outlet_name"] = outletData["name"]
spaObj["outlet_id"] = outlet_id
spaObj["services"] = []
guestService = {}
serviceData = DB.find_one(tbl_v012_services,{"_id":ObjectId(guest_details[x]["service_id"])},{"_id":0,"images":1,"name":1,"buffer":1})
guestService["service_id"] = ObjectId(guest_details[x]["service_id"])
guestService["name"] = serviceData["name"]
guestService["images"] = serviceData["images"]
guestService["service_status"] = 0
startTime = guest_details[x]["time"]
endTime = startTime
startTime = startTime.split(" ")
startTime = bookingDate+ " " + convert24(startTime[0]+":00" + startTime[1])
endTime = endTime.split(" ")
endTime = bookingDate+ " " + convert24(endTime[0]+":00" + endTime[1])
startTime = datetime.datetime.timestamp(datetime.datetime.strptime(startTime, '%Y-%m-%d %H:%M:%S'))
endTime = datetime.datetime.timestamp(datetime.datetime.strptime(endTime, '%Y-%m-%d %H:%M:%S')) + (int(guest_details[x]["duration"]) * 60 ) + 60 * int(serviceData["buffer"]["after"]) if "after" in serviceData["buffer"] else 0
guestService["start_timestamp"] = startTime
guestService["end_timestamp"] = endTime
guestService["duration"] = guest_details[x]["duration"]
guestService["date"] = bookingDate
guestService["day"] = (datetime.datetime.strptime(bookingDate, '%Y-%m-%d').strftime('%A')).lower()
guestService["time"] = guest_details[x]["time"]
guestService["price"] = float(guest_details[x]["price"])
bookPrice += float(guest_details[x]["price"])
guestService["convenience_fee"] = 0
guestService["is_for_guest"] = False
guestService["guest_details"] = {"gender":guest_details[x]["gender"],"name":guest_details[x]["name"],"mobile":guest_details[x]["mobile"]}
booking_no = generate_booking_no(guest_details[x]["name"], bookingDate)
guestService["booking_no"] = booking_no
guestDetails.append({"gender":guest_details[x]["gender"],"name":guest_details[x]["name"],"mobile":guest_details[x]["mobile"],"booking_no":booking_no,"service_id":ObjectId(guest_details[x]["service_id"]),"is_for_guest":False})
guestService["prefer"] = guest_details[x]["prefer"]
guestService["allow_customers_gender"] = outletData["allow_customers_gender"]
spaObj["services"].append(guestService)
insArr["spa"].append(spaObj)
insArr["created_date"] = todaydate
insArr["convenience_fee"] = 0
insArr["gst"] = 0
insArr["gst_in_percentage"] = 0
insArr["price"] = round(bookPrice,2)
insArr["total_price"] = round(bookPrice,2)
insArr["agree_terms"] = True
insArr["booking_id"] = ObjectId()
insArr["donate_ngo"] = False
insArr["donate_ngo_value"] = 0
insArr["guest_details"] = guestDetails
insArr["is_login"] = False
insArr["paymentData"] = {}
insArr["subscribe_newsletter"] = False
insArr["order_id"] = ""
insArr["payment_status"] = 1
insArr["payment_currency"] = "INR"
insArr["payment_id"] = ""
insArr["payment_method"] = requestData["payment_method"]
insertId = DB.insert(tbl_u002_booking,insArr)
if insertId:
# If need to send SMS then uncomment this code
""" for x in range(len(guestDetails)):
notificationObject = {}
smsObject = {}
smsObject["to"] = guestDetails[x]["mobile"]
smsObject["template"] = "vendor_otp"
smsObject["replace_value1"] = str(OTP)
smsObject["replace_value2"] = 'login'
smsObject["replace_value3"] = CONST.SMS_VALID
notificationObject["sms"] = smsObject
res = requests.post(CONST.NOTIFACTION_CLIENT, json=notificationObject)
response = dict(res.json()) """
response = output_json({"orderId":insertId},MSG_CONST.CALANDER_BOOKING_SUCCESS,True,200)
else:
response = output_json({"orderId":None},MSG_CONST.CALANDER_BOOKING_FAILED,True,200)
return response
def generate_booking_no(name,date):
try:
s = name.split(" ")
name_part=""
for values in s:
name_part = name_part+values[0].upper()
if len(name_part) > 6:
name_part = name_part[0:6]
ts_part = str(datetime.now().timestamp()).split(".")[1][-6:]
return name_part +str(date[0:2])+ ts_part
except:
return None
def get_service_data(self,outlet_id):
data={}
arr1=[]
arr2=[]
arr3=[]
selectField = {'spa':1,"_id":1}
days=1
current_time = datetime.datetime.now()
after_time = datetime.datetime.now() + datetime.timedelta(int(days))
today_date = current_time.strftime("%Y-%m-%d")
end_date= after_time.strftime("%Y-%m-%d")
#for all bookings
if outlet_id != '':
booking=DB.find_all_where(tbl_u002_booking,{"spa.outlet_id":ObjectId(outlet_id),"status":2,"payment_status":1},selectField)
if booking != []:
for bookings in booking:
for spa in bookings['spa']:
if spa['outlet_id']['$oid'] == outlet_id:
for service in spa['services']:
book_date = datetime.datetime.strptime(service['date'],"%Y-%m-%d")
service['date'] = | |
del rule_schema['rule']['sources']
# Mandatory values of a source ( NB: source is an optional value )
elif rule_source_value != '':
rule_schema['rule']['sources']['source']['value'] = rule_source_value
rule_schema['rule']['sources']['source']['type'] = API_TYPES[rule_source_type]
# Optional values of a source ( if specified )
if rule_source_excluded != '':
rule_schema['rule']['sources']['@excluded'] = rule_source_excluded
elif rule_source_name != '':
# Code to map name to value
rule_source_value = nametovalue(vccontent, client_session, rule_source_name, rule_source_type)
if rule_source_value == '':
print 'Matching Source Object ID not found - Abort - No operations have been performed on the system'
return
rule_schema['rule']['sources']['source']['value'] = rule_source_value
rule_schema['rule']['sources']['source']['type'] = API_TYPES[rule_source_type]
# Optional values of a source ( if specified )
if rule_source_excluded != '':
rule_schema['rule']['sources']['@excluded'] = rule_source_excluded
# If the destination value is "any" the section needs to be deleted
if rule_destination_value == 'any':
del rule_schema['rule']['destinations']
# Mandatory values of a destination ( NB: destination is an optional value )
elif rule_destination_value != '':
rule_schema['rule']['destinations']['destination']['value'] = rule_destination_value
#rule_schema['rule']['destinations']['destination']['type'] = rule_destination_type
rule_schema['rule']['destinations']['destination']['type'] = API_TYPES[rule_destination_type]
# Optional values of a destination ( if specified )
if rule_destination_excluded != '':
rule_schema['rule']['destinations']['@excluded'] = rule_destination_excluded
elif rule_destination_name != '':
# Code to map name to value
rule_destination_value = nametovalue(vccontent, client_session, rule_destination_name,
rule_destination_type)
if rule_destination_value == '':
print 'Matching Destination Object ID not found - No operations have been performed on the system'
return
rule_schema['rule']['destinations']['destination']['value'] = rule_destination_value
rule_schema['rule']['destinations']['destination']['type'] = API_TYPES[rule_destination_type]
# Optional values of a destination ( if specified )
if rule_destination_excluded != '':
rule_schema['rule']['destinations']['@excluded'] = rule_destination_excluded
# If no services are specified the section needs to be deleted
if rule_service_protocolname == '' and rule_service_destport == '' and rule_service_name == '':
del rule_schema['rule']['services']
elif rule_service_protocolname != '' and rule_service_destport != '' and rule_service_name != '':
print ('Service can be specified either via protocol/port or name')
return
elif rule_service_protocolname != '':
# Mandatory values of a service specified via protocol ( NB: service is an optional value )
rule_schema['rule']['services']['service']['protocolName'] = rule_service_protocolname
if rule_service_destport != '':
rule_schema['rule']['services']['service']['destinationPort'] = rule_service_destport
# Optional values of a service specified via protocol ( if specified )
if rule_service_srcport != '':
rule_schema['rule']['services']['service']['sourcePort'] = rule_service_srcport
elif rule_service_name != '':
# Mandatory values of a service specified via application/application group (service is an optional value)
rule_schema['rule']['services']['service']['value'] = ''
services = client_session.read('servicesScope', uri_parameters={'scopeId': 'globalroot-0'})
service = services.items()[1][1]['list']['application']
for servicedict in service:
if str(servicedict['name']) == rule_service_name:
rule_schema['rule']['services']['service']['value'] = str(servicedict['objectId'])
if rule_schema['rule']['services']['service']['value'] == '':
servicegroups = client_session.read('serviceGroups', uri_parameters={'scopeId': 'globalroot-0'})
servicegrouplist = servicegroups.items()[1][1]['list']['applicationGroup']
for servicegroupdict in servicegrouplist:
if str(servicegroupdict['name']) == rule_service_name:
rule_schema['rule']['services']['service']['value'] = str(servicegroupdict['objectId'])
if rule_schema['rule']['services']['service']['value'] == '':
print ('Invalid service specified')
return
try:
rule = client_session.create(rule_type, uri_parameters={'sectionId': section_id}, request_body_dict=rule_schema,
additional_headers={'If-match': section_etag})
return rule
except:
print("")
print 'Error: cannot create rule. It is possible that some of the parameters are not compatible. Please check' \
'the following rules are obeyed:'
print'(*) If the rule is applied to all edge gateways, then "inout" is the only allowed value for parameter -dir'
print'(*) Allowed values for -pktype parameter are any/ipv6/ipv4'
print'(*) For a L3 rules applied to all edge gateways "any" is the only allowed value for parameter -pktype'
print'(*) For a L2 rule "any" is the only allowed value for parameter -pktype'
print'(*) For a L3 rule allowed values for -action parameter are allow/block/reject'
print'(*) For a L2 rule allowed values for -action parameter are allow/block'
print("")
print'Aborting. No action have been performed on the system'
print("")
print("Printing current DFW rule schema used in API call")
print("-------------------------------------------------")
print rule_schema
return
def _dfw_rule_create_print(client_session, vccontent, **kwargs):
if not (kwargs['dfw_section_id']):
print ('Mandatory parameters missing: [-sid SECTION ID]')
return None
section_id = kwargs['dfw_section_id']
if not (kwargs['dfw_rule_name']):
print ('Mandatory parameters missing: [-rname RULE NAME]')
return None
rule_name = kwargs['dfw_rule_name']
if not (kwargs['dfw_rule_applyto']):
print ('Mandatory parameters missing: [-appto RULE APPLYTO VALUE ( ex: "any","dfw","edgegw" or object-id )]')
return None
if kwargs['dfw_rule_applyto'] == 'any':
rule_applyto = 'ANY'
elif kwargs['dfw_rule_applyto'] == 'dfw':
rule_applyto = 'DISTRIBUTED_FIREWALL'
elif kwargs['dfw_rule_applyto'] == 'edgegw':
rule_applyto = 'ALL_EDGES'
else:
rule_applyto = kwargs['dfw_rule_applyto']
if not (kwargs['dfw_rule_direction']):
print ('Mandatory parameters missing: [-dir RULE DIRECTION ("inout","in","out")]')
return None
if rule_applyto != 'ALL_EDGES' \
and ((kwargs['dfw_rule_direction']) == 'inout' or (kwargs['dfw_rule_direction']) == 'in' or
(kwargs['dfw_rule_direction']) == 'out'):
rule_direction = kwargs['dfw_rule_direction']
elif rule_applyto == 'ALL_EDGES' and (kwargs['dfw_rule_direction']) == 'inout':
rule_direction = kwargs['dfw_rule_direction']
else:
print ('Allowed values for -dir parameter are inout/in/out')
print('If the rule is applied to all edge gateways, then "inout" is the only allowed value for parameter -dir')
return None
if not (kwargs['dfw_rule_pktype']):
print ('Mandatory parameters missing: [-pktype RULE PACKET TYPE ("any","ipv6","ipv4"]')
return None
if rule_applyto != 'ALL_EDGES' \
and ((kwargs['dfw_rule_pktype']) == 'any' or (kwargs['dfw_rule_pktype']) == 'ipv4' or
(kwargs['dfw_rule_pktype']) == 'ipv6'):
rule_pktype = kwargs['dfw_rule_pktype']
elif rule_applyto == 'ALL_EDGES' and (kwargs['dfw_rule_pktype']) == 'any':
rule_pktype = kwargs['dfw_rule_pktype']
else:
print ('Allowed values for -pktype parameter are any/ipv6/ipv4')
print ('For a L3 rules applied to all edge gateways "any" is the only allowed value for parameter -pktype')
print ('For a L2 rule "any" is the only allowed value for parameter -pktype')
return None
if not (kwargs['dfw_rule_disabled']):
print ('Using default value "false" for rule "disabled" attribute')
rule_disabled = 'false'
elif (kwargs['dfw_rule_disabled']) == 'false' or (kwargs['dfw_rule_disabled']) == 'true':
rule_disabled = kwargs['dfw_rule_disabled']
else:
print ('Allowed values for -disabled parameter are true/false')
return None
if not (kwargs['dfw_rule_action']):
print ('Using default value "allow" for rule "action" attribute')
rule_action = 'allow'
elif (kwargs['dfw_rule_action']) == 'allow' or (kwargs['dfw_rule_action']) == 'block' \
or (kwargs['dfw_rule_action']) == 'reject':
rule_action = kwargs['dfw_rule_action']
else:
print ('For a L3 rule allowed values for -action parameter are allow/block/reject')
print ('For a L2 rule allowed values for -action parameter are allow/block')
return None
if not (kwargs['dfw_rule_source_type']):
print ('Using default value "Ipv4Address" for rule source "type" attribute')
rule_source_type = 'Ipv4Address'
else:
rule_source_type = kwargs['dfw_rule_source_type']
if not (kwargs['dfw_rule_source_value']):
rule_source_value = ''
else:
rule_source_value = kwargs['dfw_rule_source_value']
if not (kwargs['dfw_rule_source_name']):
rule_source_name = ''
else:
rule_source_name = kwargs['dfw_rule_source_name']
if rule_source_value == '' and rule_source_name == '':
print ('Either rule source parameter "value" or rule source parameter "name" must be defined')
return
if not (kwargs['dfw_rule_source_excluded']):
print ('Using default value "false" for rule source "excluded" attribute')
rule_source_excluded = 'false'
elif (kwargs['dfw_rule_source_excluded']) != 'true' or (kwargs['dfw_rule_source_excluded']) != 'false':
print ('Allowed values for rule source excluded parameter are "true" and "false"')
return
else:
rule_source_excluded = kwargs['dfw_rule_source_excluded']
if not (kwargs['dfw_rule_destination_type']):
print ('Using default value "Ipv4Address" for rule destination "type" attribute')
rule_destination_type = 'Ipv4Address'
else:
rule_destination_type = kwargs['dfw_rule_destination_type']
if not (kwargs['dfw_rule_destination_name']):
rule_destination_name = ''
else:
rule_destination_name = kwargs['dfw_rule_destination_name']
if not (kwargs['dfw_rule_destination_value']):
rule_destination_value = ''
else:
rule_destination_value = kwargs['dfw_rule_destination_value']
if rule_destination_value == '' and rule_destination_name == '':
print ('Either rule destination parameter "value" or rule destination parameter "name" must be defined')
return
if not (kwargs['dfw_rule_destination_excluded']):
print ('Using default value "false" for rule destination "excluded" attribute')
rule_destination_excluded = 'false'
elif (kwargs['dfw_rule_destination_excluded']) != 'true' and (kwargs['dfw_rule_destination_excluded']) != 'false':
print ('Allowed values for rule destination excluded parameter are "true" and "false"')
return
else:
rule_destination_excluded = kwargs['dfw_rule_destination_excluded']
if not (kwargs['dfw_rule_service_protocolname']):
rule_service_protocolname = ''
else:
rule_service_protocolname = kwargs['dfw_rule_service_protocolname']
if not (kwargs['dfw_rule_service_destport']):
rule_service_destport = ''
else:
rule_service_destport = kwargs['dfw_rule_service_destport']
if not (kwargs['dfw_rule_service_srcport']):
rule_service_srcport = ''
else:
rule_service_srcport = kwargs['dfw_rule_service_srcport']
if not (kwargs['dfw_rule_service_name']):
rule_service_name = ''
else:
rule_service_name = kwargs['dfw_rule_service_name']
if (rule_service_protocolname == '') and (rule_service_destport != ''):
print ('Protocol name must be specified in the rule service parameter if a destination port is specified')
return
if (rule_service_protocolname != '') and (rule_service_destport != '') and (rule_service_name != ''):
print ('Rule service can be specified by either protocol/port or service name, but not both')
return
if rule_applyto != 'ALL_EDGES':
if not (kwargs['dfw_rule_tag']):
rule_tag = ''
else:
rule_tag = kwargs['dfw_rule_tag']
elif rule_applyto == 'ALL_EDGES':
# If appliedTo is 'ALL_EDGES' no tags are allowed
rule_tag = ''
else:
rule_tag = ''
if not (kwargs['dfw_rule_note']):
rule_note = ''
else:
rule_note = kwargs['dfw_rule_note']
if not (kwargs['dfw_rule_logged']):
print ('Using default value "false" for rule "logging" attribute')
rule_logged = 'false'
else:
if kwargs['dfw_rule_logged'] == 'true' or kwargs['dfw_rule_logged'] == 'false':
rule_logged = kwargs['dfw_rule_logged']
else:
print ('Allowed values for rule logging are "true" and "false"')
return
#rule = dfw_rule_create(client_session, vccontent, section_id, rule_name, rule_direction, rule_pktype, rule_disabled,
#rule_action, rule_applyto, | |
<gh_stars>1-10
# epydoc -- Command line interface
#
# Copyright (C) 2005 <NAME>
# Author: <NAME> <<EMAIL>>
# URL: <http://epydoc.sf.net>
#
# $Id: cli.py 1196 2006-04-09 18:15:55Z edloper $
"""
Command-line interface for epydoc. Abbreviated Usage::
epydoc [options] NAMES...
NAMES... The Python modules to document.
--html Generate HTML output (default).
--latex Generate LaTeX output.
--pdf Generate pdf output, via LaTeX.
-o DIR, --output DIR The output directory.
--inheritance STYLE The format for showing inherited objects.
-V, --version Print the version of epydoc.
-h, --help Display a usage message.
Run \"epydoc --help\" for a complete option list. See the epydoc(1)
man page for more information.
Config Files
============
Configuration files can be specified with the C{--config} option.
These files are read using U{ConfigParser
<http://docs.python.org/lib/module-ConfigParser.html>}. Configuration
files may set options or add names of modules to document. Option
names are (usually) identical to the long names of command line
options. To specify names to document, use any of the following
option names::
module modules value values object objects
A simple example of a config file is::
[epydoc]
modules: sys, os, os.path, re
name: Example
graph: classtree
introspect: no
Verbosity Levels
================
The C{-v} and C{-q} options increase and decrease verbosity,
respectively. The default verbosity level is zero. The verbosity
levels are currently defined as follows::
Progress Markup warnings Warnings Errors
-3 none no no no
-2 none no no yes
-1 none no yes yes
0 (default) bar no yes yes
1 bar yes yes yes
2 list yes yes yes
"""
__docformat__ = 'epytext en'
import sys, os, time, re, pstats
from glob import glob
from optparse import OptionParser, OptionGroup
import epydoc
from epydoc import log
from epydoc.util import wordwrap, run_subprocess, RunSubprocessError
from epydoc.apidoc import UNKNOWN
import ConfigParser
INHERITANCE_STYLES = ('grouped', 'listed', 'included')
GRAPH_TYPES = ('classtree', 'callgraph', 'umlclasstree')
ACTIONS = ('html', 'text', 'latex', 'dvi', 'ps', 'pdf', 'check')
DEFAULT_DOCFORMAT = 'epytext'
######################################################################
#{ Argument & Config File Parsing
######################################################################
def parse_arguments():
# Construct the option parser.
usage = '%prog ACTION [options] NAMES...'
version = "Epydoc, version %s" % epydoc.__version__
optparser = OptionParser(usage=usage, version=version)
action_group = OptionGroup(optparser, 'Actions')
options_group = OptionGroup(optparser, 'Options')
# Add options -- Actions
action_group.add_option( # --html
"--html", action="store_const", dest="action", const="html",
help="Write HTML output.")
action_group.add_option( # --latex
"--text", action="store_const", dest="action", const="text",
help="Write plaintext output. (not implemented yet)")
action_group.add_option( # --latex
"--latex", action="store_const", dest="action", const="latex",
help="Write LaTeX output.")
action_group.add_option( # --dvi
"--dvi", action="store_const", dest="action", const="dvi",
help="Write DVI output.")
action_group.add_option( # --ps
"--ps", action="store_const", dest="action", const="ps",
help="Write Postscript output.")
action_group.add_option( # --pdf
"--pdf", action="store_const", dest="action", const="pdf",
help="Write PDF output.")
action_group.add_option( # --check
"--check", action="store_const", dest="action", const="check",
help="Check completeness of docs.")
# Add options -- Options
options_group.add_option( # --output
"--output", "-o", dest="target", metavar="PATH",
help="The output directory. If PATH does not exist, then "
"it will be created.")
options_group.add_option( # --show-imports
"--inheritance", dest="inheritance", metavar="STYLE",
help="The format for showing inheritance objects. STYLE "
"should be one of: %s." % ', '.join(INHERITANCE_STYLES))
options_group.add_option( # --output
"--docformat", dest="docformat", metavar="NAME",
help="The default markup language for docstrings. Defaults "
"to \"%s\"." % DEFAULT_DOCFORMAT)
options_group.add_option( # --css
"--css", dest="css", metavar="STYLESHEET",
help="The CSS stylesheet. STYLESHEET can be either a "
"builtin stylesheet or the name of a CSS file.")
options_group.add_option( # --name
"--name", dest="prj_name", metavar="NAME",
help="The documented project's name (for the navigation bar).")
options_group.add_option( # --url
"--url", dest="prj_url", metavar="URL",
help="The documented project's URL (for the navigation bar).")
options_group.add_option( # --navlink
"--navlink", dest="prj_link", metavar="HTML",
help="HTML code for a navigation link to place in the "
"navigation bar.")
options_group.add_option( # --top
"--top", dest="top_page", metavar="PAGE",
help="The \"top\" page for the HTML documentation. PAGE can "
"be a URL, the name of a module or class, or one of the "
"special names \"trees.html\", \"indices.html\", or \"help.html\"")
options_group.add_option( # --help-file
"--help-file", dest="help_file", metavar="FILE",
help="An alternate help file. FILE should contain the body "
"of an HTML file -- navigation bars will be added to it.")
options_group.add_option( # --frames
"--show-frames", action="store_true", dest="show_frames",
help="Include frames in the HTML output. (default)")
options_group.add_option( # --no-frames
"--no-frames", action="store_false", dest="show_frames",
help="Do not include frames in the HTML output.")
options_group.add_option( # --private
"--show-private", action="store_true", dest="show_private",
help="Include private variables in the output. (default)")
options_group.add_option( # --no-private
"--no-private", action="store_false", dest="show_private",
help="Do not include private variables in the output.")
options_group.add_option( # --show-imports
"--show-imports", action="store_true", dest="show_imports",
help="List each module's imports.")
options_group.add_option( # --show-imports
"--no-imports", action="store_false", dest="show_imports",
help="Do not list each module's imports. (default)")
options_group.add_option( # --quiet
"--quiet", "-q", action="count", dest="quiet",
help="Decrease the verbosity.")
options_group.add_option( # --verbose
"--verbose", "-v", action="count", dest="verbose",
help="Increase the verbosity.")
options_group.add_option( # --debug
"--debug", action="store_true", dest="debug",
help="Show full tracebacks for internal errors.")
options_group.add_option( # --parse-only
"--parse-only", action="store_false", dest="introspect",
help="Get all information from parsing (don't introspect)")
options_group.add_option( # --introspect-only
"--introspect-only", action="store_false", dest="parse",
help="Get all information from introspecting (don't parse)")
if epydoc.DEBUG:
# this option is for developers, not users.
options_group.add_option(
"--profile-epydoc", action="store_true", dest="profile",
help="Run the profiler. Output will be written to profile.out")
options_group.add_option(
"--dotpath", dest="dotpath", metavar='PATH',
help="The path to the Graphviz 'dot' executable.")
options_group.add_option(
'--config', action='append', dest="configfiles", metavar='FILE',
help=("A configuration file, specifying additional OPTIONS "
"and/or NAMES. This option may be repeated."))
options_group.add_option(
'--graph', action='append', dest='graphs', metavar='GRAPHTYPE',
help=("Include graphs of type GRAPHTYPE in the generated output. "
"Graphs are generated using the Graphviz dot executable. "
"If this executable is not on the path, then use --dotpath "
"to specify its location. This option may be repeated to "
"include multiple graph types in the output. GRAPHTYPE "
"should be one of: all, %s." % ', '.join(GRAPH_TYPES)))
options_group.add_option(
'--separate-classes', action='store_true',
dest='list_classes_separately',
help=("When generating LaTeX or PDF output, list each class in "
"its own section, instead of listing them under their "
"containing module."))
options_group.add_option(
'--show-sourcecode', action='store_true', dest='include_source_code',
help=("Include source code with syntax highlighting in the "
"HTML output."))
options_group.add_option(
'--no-sourcecode', action='store_false', dest='include_source_code',
help=("Do not include source code with syntax highlighting in the "
"HTML output."))
options_group.add_option(
'--pstat', action='append', dest='pstat_files', metavar='FILE',
help="A pstat output file, to be used in generating call graphs.")
# Add the option groups.
optparser.add_option_group(action_group)
optparser.add_option_group(options_group)
# Set the option parser's defaults.
optparser.set_defaults(action="html", show_frames=True,
docformat=DEFAULT_DOCFORMAT,
show_private=True, show_imports=False,
inheritance="listed",
verbose=0, quiet=0,
parse=True, introspect=True,
debug=epydoc.DEBUG, profile=False,
graphs=[], list_classes_separately=False,
include_source_code=True, pstat_files=[])
# Parse the arguments.
options, names = optparser.parse_args()
# Process any config files.
if options.configfiles:
try:
parse_configfiles(options.configfiles, options, names)
except (KeyboardInterrupt,SystemExit): raise
except Exception, e:
optparser.error('Error reading config file:\n %s' % e)
# Check to make sure all options are valid.
if len(names) == 0:
optparser.error("No names specified.")
# perform shell expansion.
for i, name in enumerate(names[:]):
if '?' in name or '*' in name:
names[i:i+1] = glob(name)
if options.inheritance not in INHERITANCE_STYLES:
optparser.error("Bad inheritance style. Valid options are " +
",".join(INHERITANCE_STYLES))
if not options.parse and not options.introspect:
optparser.error("Invalid option combination: --parse-only "
"and --introspect-only.")
if options.action == 'text' and len(names) > 1:
optparser.error("--text option takes only one name.")
# Check the list of requested graph types to make sure they're
# acceptable.
options.graphs = [graph_type.lower() for graph_type in options.graphs]
for graph_type in options.graphs:
if graph_type == 'callgraph' and not options.pstat_files:
optparser.error('"callgraph" graph type may only be used if '
'one or more pstat files are specified.')
# If it's 'all', then add everything (but don't add callgraph if
# we don't have any profiling info to base them on).
if graph_type == 'all':
if options.pstat_files:
options.graphs = GRAPH_TYPES
else:
options.graphs = [g for g in GRAPH_TYPES if g != 'callgraph']
break
elif graph_type not in GRAPH_TYPES:
optparser.error("Invalid graph type %s." % graph_type)
# Calculate verbosity.
options.verbosity = options.verbose - options.quiet
# The target default depends on the action.
if options.target is None:
options.target = options.action
# Return parsed args.
return options, names
def parse_configfiles(configfiles, options, names):
configparser = ConfigParser.ConfigParser()
# ConfigParser.read() silently ignores errors, so open the files
# manually (since we want to notify the user of any errors).
for configfile in configfiles:
fp = open(configfile, 'r') # may raise IOError.
configparser.readfp(fp, configfile)
fp.close()
for optname in configparser.options('epydoc'):
val = configparser.get('epydoc', optname).strip()
optname = optname.lower().strip()
if optname in ('modules', 'objects', 'values',
'module', 'object', 'value'):
names.extend(val.replace(',', ' ').split())
elif optname == 'output':
if optname not in ACTIONS:
raise ValueError('"%s" expected one of: %s' %
(optname, ', '.join(ACTIONS)))
options.action = action
| |
signature(self._predictor)
model_param, *_ = predictor_sig.parameters.values()
model_param = model_param.replace(name="model_object")
# assume that reader_return_type is a dict with only a single entry
[(_, data_arg_type)] = self._dataset.reader_return_type.items()
data_param = Parameter("features", kind=Parameter.KEYWORD_ONLY, annotation=data_arg_type)
@inner_task(
unionml_obj=self,
input_parameters=OrderedDict([("model_object", model_param), ("features", data_param)]),
return_annotation=predictor_sig.return_annotation,
**self._predict_task_kwargs,
)
def predict_from_features_task(model_object, features):
return self._predictor(model_object, features)
self._predict_from_features_task = predict_from_features_task
return predict_from_features_task
def train(
self,
hyperparameters: Optional[Dict[str, Any]] = None,
trainer_kwargs: Optional[Dict[str, Any]] = None,
**reader_kwargs,
) -> Tuple[Any, Any]:
"""Train a model object locally
:param hyperparameters: a dictionary mapping hyperparameter names to values. This is passed into the
``init`` callable to initialize a model object.
:param trainer_kwargs: a dictionary mapping training parameter names to values. There training parameters
are determined by the keyword-only arguments of the ``model.trainer`` function.
:param reader_kwargs: keyword arguments that correspond to the :meth:`unionml.Dataset.reader` method signature.
"""
trainer_kwargs = {} if trainer_kwargs is None else trainer_kwargs
model_obj, hyperparameters, metrics = self.train_workflow()(
hyperparameters=self.hyperparameter_type(**({} if hyperparameters is None else hyperparameters)),
**{**reader_kwargs, **trainer_kwargs},
)
self.artifact = ModelArtifact(model_obj, hyperparameters, metrics)
return model_obj, metrics
def predict(
self,
features: Any = None,
**reader_kwargs,
):
"""Generate predictions locally.
You can either pass this function raw features via the ``features`` argument or you can pass in keyword
arguments that will be forwarded to the :meth:`unionml.Dataset.reader` method as the feature source.
:param features: Raw features that are pre-processed by the :py:class:``unionml.Dataset`` methods in the
following order:
- :meth:`unionml.dataset.Dataset.feature_loader`
- :meth:`unionml.dataset.Dataset.parser`
- :meth:`unionml.dataset.Dataset.feature_transformer`
:param reader_kwargs: keyword arguments that correspond to the :meth:`unionml.Dataset.reader` method signature.
"""
if features is None and not reader_kwargs:
raise ValueError("At least one of features or **reader_kwargs needs to be provided")
if self.artifact is None:
raise RuntimeError(
"ModelArtifact not found. You must train a model first with the `train` method before generating "
"predictions."
)
if features is None:
return self.predict_workflow()(model_object=self.artifact.model_object, **reader_kwargs)
return self.predict_from_features_workflow()(
model_object=self.artifact.model_object,
features=self._dataset.get_features(features),
)
def save(self, file: Union[str, os.PathLike, IO], *args, **kwargs):
"""Save the model object to disk."""
if self.artifact is None:
raise AttributeError("`artifact` property is None. Call the `train` method to train a model first")
return self._saver(self.artifact.model_object, self.artifact.hyperparameters, file, *args, **kwargs)
def load(self, file: Union[str, os.PathLike, IO], *args, **kwargs):
"""Load a model object from disk."""
return self._loader(file, *args, **kwargs)
def serve(self, app: FastAPI, remote: bool = False, model_version: str = "latest"):
"""Create a FastAPI serving app.
:param app: A ``FastAPI`` app to use for model serving.
"""
from unionml.fastapi import serving_app
serving_app(self, app, remote=remote, model_version=model_version)
def remote(
self,
registry: Optional[str] = None,
image_name: str = None,
dockerfile: str = "Dockerfile",
config_file: Optional[str] = None,
project: Optional[str] = None,
domain: Optional[str] = None,
):
"""Configure the ``unionml.Model`` for remote backend deployment.
:param registry: Docker registry used to push UnionML app.
:param image_name: image name to give to the Docker image associated with the UnionML app.
:param dockerfile: path to the Dockerfile used to package the UnionML app.
:param config_file: path to the `flytectl config <https://docs.flyte.org/projects/flytectl/en/latest/>`__ to use for
deploying your UnionML app to a Flyte backend.
:param project: deploy your app to this Flyte project name.
:param project: deploy your app to this Flyte domain name.
"""
self._config_file = config_file
self._registry = registry
self._image_name = image_name
self._dockerfile = dockerfile
self._project = project
self._domain = domain
@property
def _remote(self) -> Optional[FlyteRemote]:
if self.__remote__ is not None:
return self.__remote__
config = Config.auto(config_file=self._config_file)
if config.platform.endpoint.startswith("localhost"):
config = Config.for_sandbox()
self.__remote__ = FlyteRemote(
config=config,
default_project=self._project,
default_domain=self._domain,
)
return self.__remote__
def remote_deploy(self):
"""Deploy model services to a Flyte backend."""
from unionml import remote
app_version = remote.get_app_version()
image = remote.get_image_fqn(self, app_version, self._image_name)
os.environ["FLYTE_INTERNAL_IMAGE"] = image or ""
_remote = self._remote
remote.create_project(_remote, self._project)
if _remote.config.platform.endpoint.startswith("localhost"):
# assume that a localhost flyte_admin_url means that we want to use Flyte sandbox
remote.sandbox_docker_build(self, image)
else:
remote.docker_build_push(self, image)
args = [_remote._default_project, _remote._default_domain, app_version]
for wf in [
self.train_workflow(),
self.predict_workflow(),
self.predict_from_features_workflow(),
]:
remote.deploy_wf(wf, _remote, image, *args)
def remote_train(
self,
app_version: str = None,
wait: bool = True,
*,
hyperparameters: Optional[Dict[str, Any]] = None,
trainer_kwargs: Optional[Dict[str, Any]] = None,
**reader_kwargs,
) -> Union[ModelArtifact, FlyteWorkflowExecution]:
"""Train a model object on a remote Flyte backend.
:param app_version: if provided, executes a training job using the specified UnionML app version. By default,
this uses the current git sha of the repo, which versions your UnionML app.
:param wait: if True, this is a synchronous operation, returning a ``ModelArtifact``. Otherwise, this
function returns a ``FlyteWorkflowExecution``.
:param hyperparameters: a dictionary mapping hyperparameter names to values. This is passed into the
``init`` callable to initialize a model object.
:param trainer_kwargs: a dictionary mapping training parameter names to values. There training parameters
are determined by the keyword-only arguments of the ``model.trainer`` function.
:param reader_kwargs: keyword arguments that correspond to the :meth:`unionml.Dataset.reader` method signature.
"""
if self._remote is None:
raise RuntimeError("First configure the remote client with the `Model.remote` method")
from unionml import remote
app_version = app_version or remote.get_app_version()
train_wf = self._remote.fetch_workflow(name=self.train_workflow_name, version=app_version)
execution = self._remote.execute(
train_wf,
inputs={
"hyperparameters": self.hyperparameter_type(**({} if hyperparameters is None else hyperparameters)),
**{**reader_kwargs, **({} if trainer_kwargs is None else trainer_kwargs)}, # type: ignore
},
project=self._remote.default_project,
domain=self._remote.default_domain,
wait=wait,
type_hints={"hyperparameters": self.hyperparameter_type},
)
console_url = self._remote.generate_console_url(execution)
print(
f"Executing {train_wf.id.name}, execution name: {execution.id.name}."
f"\nGo to {console_url} to see the execution in the console."
)
if not wait:
return execution
self.remote_load(execution)
return self.artifact
def remote_predict(
self,
app_version: str = None,
model_version: str = None,
wait: bool = True,
*,
features: Any = None,
**reader_kwargs,
) -> Union[Any, FlyteWorkflowExecution]:
"""Generate predictions on a remote Flyte backend.
You can either pass this function raw features via the ``features`` argument or you can pass in keyword
arguments that will be forwarded to the :meth:`unionml.Dataset.reader` method as the feature source.
:param app_version: if provided, executes a prediction job using the specified UnionML app version. By default,
this uses the current git sha of the repo, which versions your UnionML app.
:param model_version: if provided, executes a prediction job using the specified model version. By default, this
uses the latest Flyte execution id as the model version.
:param wait: if True, this is a synchronous operation, returning a ``ModelArtifact``. Otherwise, this
function returns a ``FlyteWorkflowExecution``.
:param features: Raw features that are pre-processed by the :py:class:``unionml.Dataset`` methods in the
following order:
- :meth:`unionml.dataset.Dataset.feature_loader`
- :meth:`unionml.dataset.Dataset.parser`
- :meth:`unionml.dataset.Dataset.feature_transformer`
:param reader_kwargs: keyword arguments that correspond to the :meth:`unionml.Dataset.reader` method signature.
"""
if self._remote is None:
raise RuntimeError("First configure the remote client with the `Model.remote` method")
from unionml import remote
app_version = app_version or remote.get_app_version()
model_artifact = remote.get_model_artifact(self, app_version, model_version)
if (features is not None and len(reader_kwargs) > 0) or (features is None and len(reader_kwargs) == 0):
raise ValueError("You must provide only one of `features` or `reader_kwargs`")
inputs = {"model_object": model_artifact.model_object}
if features is None:
workflow_name = self.predict_workflow_name
inputs.update(reader_kwargs)
type_hints = {}
else:
workflow_name = self.predict_from_features_workflow_name
inputs.update({"features": self._dataset.get_features(features)})
type_hints = {"features": [*self._dataset.reader_return_type.values()][0]}
predict_wf = self._remote.fetch_workflow(
self._remote._default_project,
self._remote._default_domain,
workflow_name,
app_version,
)
execution = self._remote.execute(
predict_wf,
inputs=inputs,
project=self._remote.default_project,
domain=self._remote.default_domain,
wait=wait,
type_hints=type_hints,
)
console_url = self._remote.generate_console_url(execution)
print(
f"Executing {predict_wf.id.name}, execution name: {execution.id.name}."
f"\nGo to {console_url} to see the execution in the console."
)
if not wait:
return execution
predictions, *_ = execution.outputs.values()
return predictions
def remote_wait(self, execution: FlyteWorkflowExecution, **kwargs) -> Any:
"""Wait for a ``FlyteWorkflowExecution`` to complete and returns the execution's output."""
if self._remote is None:
raise ValueError("You must call `model.remote` to attach a remote backend to this model.")
return self._remote.wait(execution, **kwargs)
def remote_load(self, execution: FlyteWorkflowExecution):
"""Load a ``ModelArtifact`` based on the provided Flyte execution.
:param execution: a Flyte workflow execution, which is the output of ``remote_train(..., wait=False)`` .
"""
if self._remote is None:
raise ValueError("You must call `model.remote` to attach a remote backend to this model.")
if not execution.is_done:
print(f"Waiting for execution {execution.id.name} to complete...")
execution = self.remote_wait(execution)
print("Done.")
with self._remote.remote_context():
self.artifact = ModelArtifact(
execution.outputs["model_object"],
execution.outputs["hyperparameters"],
execution.outputs["metrics"],
)
def remote_list_model_versions(self, app_version: str = None, limit: int = 10) -> List[str]:
"""Lists all the model versions of this UnionML app, in reverse chronological order.
:param app_version: if provided, lists the model versions associated with this app version. By default,
| |
<filename>plot.py<gh_stars>0
import itertools
import os
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import pyplot as plt
from constants import (LV, Darzacq2007, L, Tantale2016, colors_additivity,
figures_folder, gene_long, l, markers_additivity)
from support import (J_over_k_HD, J_over_k_LD, alpha_over_k_abortive,
alpha_over_k_MC, rho_HD, rho_LD, set_figure_size)
matplotlib.use('Agg')
# Constants
alpha_over_k_min = 0
alpha_over_k_max = 1 / (1 + np.sqrt(l)) # theoretical maximum
colors_theor = [colors_additivity['sum'], colors_additivity['brown']]
def plot_theory(ax):
"""Plot theoretical curves for rho and j"""
steps = 100
alpha_over_k_mesh = np.linspace(alpha_over_k_min, alpha_over_k_max, num=steps)
ax.plot(alpha_over_k_mesh, rho_func(alpha_over_k_mesh), label='$\\rho$', c=colors_theor[0])
ax.plot(alpha_over_k_mesh, j_func(alpha_over_k_mesh), label='$j$', c=colors_theor[1])
return
def rho_func(aoK):
return aoK * l / (1 + aoK * (l - 1))
def j_func(aoK):
"""
As a function of alpha/k
"""
up = aoK * (1 - aoK) * (1 + np.sqrt(l)) ** 2
down = 1 + aoK * (l - 1)
return up / down
def adjust_plot():
plt.xlabel('Effective initiation rate $\\alpha/k$')
plt.ylabel('$\\rho$, $j$')
plt.ylim([-0.0, 1.02])
plt.legend(labelspacing=0.2, frameon=False, borderaxespad=0)
def plot_j_alpha_curve(analyses=None, pdf=False):
"""
This function plots the relation between the time-constant alpha and the normalized density rho and polymerase flows j
"""
# Constants
ms1 = 8
ms2 = 4 # size of the marker for errorbar plot to superimpose on the the scatter data plot
alpha = 0.25
height_factor = 0.65
elinewidth = 1 # errorbar line width
markers = ('x', 'o', '^', 'd', '*')
colors = [colors_additivity[key] for key in colors_additivity]
long_labels = ['bac', 'no pr', 'no sh']
gene_long = {'hb': 'hunchback', 'kn': 'knirps', 'sn': 'snail'}
genes = ['hb', 'sn', 'kn']
constructs = ['bac', 'no_pr', 'no_sh']
figname = 'j_alpha'
marker = itertools.cycle(markers)
fig = plt.figure(num=21)
set_figure_size(21, rows=1, page_width_frac=0.5, height_factor=height_factor)
ax = fig.add_subplot(111)
plot_theory(ax)
# Add literature data
for article in [Darzacq2007, Tantale2016]:
# Plot density rho
rho, rhoV, aoK = [article[key] for key in ['rho', 'rhoV', 'alpha_over_k']]
c = 'k'
m = next(marker)
ax.scatter(aoK, rho, s=ms1, c=c, marker=m, zorder=4, label=article['label'])
ax.errorbar(aoK, rho, yerr=np.sqrt(rhoV), fmt='.', markersize=ms2, c=c,
elinewidth=elinewidth, alpha=alpha, zorder=3)
# Plot flux j
j, jV, aoK = [article[key] for key in ['j', 'jV', 'alpha_over_k']]
ax.scatter(aoK, j, s=ms1, c=c, marker=m, zorder=4)
ax.errorbar(aoK, j, yerr=np.sqrt(jV), fmt='.', markersize=ms2, c=c,
elinewidth=elinewidth, alpha=alpha, zorder=3)
plt.xlim([alpha_over_k_min - 1e-3, alpha_over_k_max])
adjust_plot()
plt.show()
plt.tight_layout()
# Save
figname1 = figname + '_no_data'
figpath = os.path.join(figures_folder, figname1)
plt.savefig(figpath + '.png', pad_inches=0, bbox_inches='tight')
if pdf:
plt.savefig(figpath + '.pdf', pad_inches=0, bbox_inches='tight')
# Add and plot our data separately for each gene
if analyses is not None:
num = 22
for gene in genes:
# Reinitialize marker and color generators
marker = itertools.cycle(markers)
color = itertools.cycle(colors)
set_figure_size(num=num, rows=1, page_width_frac=0.5, height_factor=height_factor)
fig, ax = plt.subplots(1, 1, num=num, clear=True)
plot_theory(ax)
# Plot different constructs separately on the same plot
for construct_id, construct in enumerate(constructs):
c = next(color)
m = next(marker)
analyses_filtered = analyses[(analyses.gene == gene)
& (analyses.construct == construct)]
# %% Load data for j
x = aoK = analyses_filtered.loc[:, 'alpha_over_k_J']
aoKV = analyses_filtered.loc[:, 'alpha_over_k_JV'].values
y = j = analyses_filtered.loc[:, 'j'].values
jV = analyses_filtered.loc[:, 'jV'].values
xstd = np.sqrt(aoKV)
ystd = np.sqrt(jV)
# Plot
ax.scatter(x, y,
label=f'{long_labels[construct_id]}', s=ms1, c=c, marker=m, zorder=4)
ax.errorbar(x, y, xerr=xstd, yerr=ystd, fmt='.', markersize=ms2, c=c,
elinewidth=elinewidth, alpha=alpha, zorder=3)
# %% Load data for rho
x = aoK = analyses_filtered.loc[:, 'alpha_over_k_rho'].values
aoKV = analyses_filtered.loc[:, 'alpha_over_k_rhoV'].values
y = rho = analyses_filtered.loc[:, 'rho'].values
rhoV = analyses_filtered.loc[:, 'rhoV'].values
xstd = np.sqrt(aoKV)
ystd = np.sqrt(rhoV)
# Plot
ax.scatter(x, y,
s=ms1, c=c, marker=m, zorder=4)
ax.errorbar(x, y, xerr=xstd, yerr=ystd, fmt='.', markersize=ms2, c=c,
elinewidth=elinewidth, alpha=alpha, zorder=3)
adjust_plot()
plt.xlim([alpha_over_k_min - 1e-3, alpha_over_k_max])
plt.title(gene_long[gene])
plt.show()
plt.tight_layout()
# Save
figname1 = figname + f'_{gene}'
figpath = os.path.join(figures_folder, figname1)
plt.savefig(figpath + '.png', pad_inches=0, bbox_inches='tight')
if pdf:
plt.savefig(figpath + '.pdf', pad_inches=0, bbox_inches='tight')
return
def plot_normalized_current_density_diagram(analyses_in, num, pdf=True):
"""
Plot the normalized current-density diagram with and without earlier literature data.
"""
# Constants
height_factor = 0.65
ylims = [0, 1.01]
xlims = [0, 1.02]
gene_long = {'hb': 'hunchback', 'kn': 'knirps', 'sn': 'snail'}
markers = ('x', 'o', '^', 'd', '*')
colors = [colors_additivity[key] for key in colors_additivity]
ms1 = 8 # marker size
ms2 = 3 # smaller markers for the error bars
alpha = 0.25
elinewidth = 1 # line width for the errorbars
long_labels = ['bac', 'no pr', 'no sh']
analyses = analyses_in.copy()
genes = set(analyses.gene)
constructs = set(analyses.construct)
# %% Plot a diagram with literature data
fig = set_figure_size(num=num, rows=1, page_width_frac=0.5,
height_factor=height_factor, clear=True)
ax = fig.subplots(1, 1)
marker = itertools.cycle(markers)
color = itertools.cycle(colors)
# Plot
for data in [Darzacq2007, Tantale2016]:
c = 'k' # color
m = next(marker)
rho, rhoV, j, jV = [data[key] for key in ['rho', 'rhoV', 'j', 'jV']]
ax.scatter(rho, j, s=ms1, c=c, marker=m, zorder=4, label=data['label'])
ax.errorbar(rho, j, xerr=rhoV**(1 / 2), yerr=jV**(1 / 2), fmt='.', markersize=ms2, c=c,
elinewidth=elinewidth, alpha=alpha, zorder=3)
# Add the current-density curve
plot_theoretical_curve(ax)
plot_adjust()
plt.xlim(xlims)
plt.ylim(ylims)
plt.ylim(ymin=-0.02)
# Save figure
figname = os.path.join(figures_folder, 'current-density_literature')
fig.savefig(figname + '.png', pad_inches=0, bbox_inches='tight')
if pdf:
fig.savefig(figname + '.pdf', pad_inches=0, bbox_inches='tight')
# %% Plot a diagram with our data
num += 1
for gene in genes:
marker = itertools.cycle(markers)
color = itertools.cycle(colors)
set_figure_size(num=num, rows=1, page_width_frac=0.5, height_factor=height_factor)
fig, ax = plt.subplots(1, 1, num=num, clear=True)
# Plot data grouped by gene and construct
for construct_id, construct in enumerate(constructs):
figname = '_with_data'
c = next(color)
m = next(marker)
analyses_filtered = analyses[
(analyses.gene == gene) & (analyses.construct == construct)]
x = analyses_filtered.loc[:, 'rho'].values
rV = analyses_filtered.loc[:, 'rhoV'].values
y = analyses_filtered.loc[:, 'j'].values
jV = analyses_filtered.loc[:, 'jV'].values
xstd = np.sqrt(rV)
ystd = np.sqrt(jV)
ax.scatter(x, y,
label=f'{long_labels[construct_id]}', s=ms1, c=c, marker=m, zorder=4)
ax.errorbar(x, y, xerr=xstd, yerr=ystd, fmt='.', markersize=ms2, c=c,
elinewidth=elinewidth, alpha=alpha, zorder=3)
# Add the current-density diagram
plot_theoretical_curve(ax)
# Adjust the plot
plot_adjust()
plt.xlim(xlims)
plt.ylim(ylims)
plt.title(f'{gene_long[gene]}')
# Add fits of the current-density diagram: transit time and the effective gene length
T, TV, L, LV = analyses_filtered[['T', 'TV', 'L', 'LV']].values[0]
L_rnd, Lstd_rnd = np.round(np.array([L, np.sqrt(LV)]) / 1e3, decimals=2)
ax.text(
0.57, 0.05, f'$T = {T:.2f} \pm {np.sqrt(TV):.2f}$ min\n$L = {L_rnd:.1f} \pm {Lstd_rnd:.1f}$ kb', transform=ax.transAxes, va='bottom', ha='left', fontsize=8, weight='normal', style='normal', family='sans-serif')
# Save figure
figname = os.path.join(figures_folder, 'current-density_' +
gene_long[gene] + figname)
fig.savefig(figname + '.png', pad_inches=0, bbox_inches='tight')
if pdf:
fig.savefig(figname + '.pdf', pad_inches=0, bbox_inches='tight')
return
def plot_adjust():
"""
Adjust the plot
"""
plt.xlabel('Site occupation density $\\rho$')
plt.ylabel('Normalized flux $j$')
plt.legend(loc='upper left', labelspacing=0.2, frameon=False, borderaxespad=0)
plt.tight_layout()
return
def plot_theoretical_curve(ax):
"""
Plot current vs density for the whole available range of densities.
This inlcudes LD, MC and HD phases.
Formulas based on Shaw2003 and the accompanying manuscript.
Notation:
rho # site occupation density
rho/l # polymerase density
J # polymerase current
alphaT = alpha/k # Dimensionless injection attempt rate \tilde{alpha}
betaT = beta/k # Dimensionless exit attempt rate \tilde{beta}
J = sT = s/k # particle current
alphaTMC = betaTMC # Dimensionless rate for the max. current regime
"""
steps = 100
colorLD = colors_additivity['sum'] # '#0072BD'
colorHD = colors_additivity['brown'] # '#FF7F0E'
a_mesh = np.linspace(0, alpha_over_k_MC, num=steps, endpoint=True)
b_mesh = a_mesh
JoK_MC = J_over_k_LD(alpha_over_k_MC)
J_fact = JoK_MC
ax.plot(rho_LD(a_mesh), J_over_k_LD(a_mesh) / J_fact, c=colorLD)
ax.plot(rho_HD(b_mesh), J_over_k_HD(b_mesh) / J_fact, c=colorHD)
return
def plot_parameter_evolution(analyses, pdf=False):
"""
Plot changes in j, rho, alpha and tau across ncs for different genes and constructs.
"""
ncs = np.arange(11, 15)
genes = set(analyses.gene)
constructs = set(analyses.construct)
long_labels = {'bac': 'bac', 'no_pr': 'no pr', 'no_sh': 'no sh'}
gene_long = {'hb': 'hunchback', 'kn': 'knirps', 'sn': 'snail'}
y_label = {'j': 'Normalized flux $j$',
'rho': 'Site occupation density $\\rho$', 'tau': 'Residence time $\\tau$ (s)', 'alpha_comb': 'Initiation rate $\\alpha$ (pol/min)'}
# Add extra jiggle to be able to distinguish overlapping data points
x_jiggle = 0.04
x_shifts = np.array([-1, 0, 1]) * x_jiggle
# Plot parameters
capsize = 0
markersize = 4
lw = 1 # line width
for gene in genes:
grouped_data = analyses.groupby(by=['gene', 'construct', 'nc'])
all_means = grouped_data.mean()
all_stds = grouped_data.std(ddof=1)
all_ps = analyses.groupby(by=['gene', 'nc']).first()
for quantity in ['j', 'rho', 'tau', 'alpha_comb']:
ymaxs = {'j': 0.36, 'rho': 0.27, 'tau': 103, 'alpha_comb': 12}
num = 12
set_figure_size(num=num, rows=1, page_width_frac=0.5,
clear=True, height_factor=0.7)
fig, ax = plt.subplots(1, 1, num=num, clear=True)
avg_data, std_data = {}, {}
for construct in constructs:
if quantity in ['rho', 'j']:
avg_data[construct] = all_means.loc[(
gene, construct, slice(None)), quantity].values
std_data[construct] = all_stds.loc[(
gene, construct, slice(None)), quantity].values
elif quantity in | |
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the PyMVPA package for the
# copyright and license terms.
#
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
""""""
__docformat__ = 'restructuredtext'
from math import floor
import numpy as np
from mvpa2.base.dataset import AttrDataset
from mvpa2.base.state import ClassWithCollections, ConditionalAttribute
if __debug__:
from mvpa2.base import debug
#
# Functors to be used for FeatureSelection
#
class BestDetector(object):
"""Determine whether the last value in a sequence is the best one given
some criterion.
"""
def __init__(self, func=min, lastminimum=False):
"""Initialize with number of steps
Parameters
----------
fun : functor
Functor to select the best results. Defaults to min
lastminimum : bool
Toggle whether the latest or the earliest minimum is used as
optimal value to determine the stopping criterion.
"""
self.__func = func
self.__lastminimum = lastminimum
self.__bestindex = None
"""Stores the index of the last detected best value."""
def __call__(self, errors):
"""Returns True if the last value in `errors` is the best or False
otherwise.
"""
isbest = False
# just to prevent ValueError
if len(errors)==0:
return isbest
minerror = self.__func(errors)
if self.__lastminimum:
# make sure it is an array
errors = np.array(errors)
# to find out the location of the minimum but starting from the
# end!
minindex = np.array((errors == minerror).nonzero()).max()
else:
minindex = errors.index(minerror)
self.__bestindex = minindex
# if minimal is the last one reported -- it is the best
if minindex == len(errors)-1:
isbest = True
return isbest
bestindex = property(fget=lambda self:self.__bestindex)
class StoppingCriterion(object):
"""Base class for all functors to decide when to stop RFE (or may
be general optimization... so it probably will be moved out into
some other module
"""
def __call__(self, errors):
"""Instruct when to stop.
Every implementation should return `False` when an empty list is
passed as argument.
Returns tuple `stop`.
"""
raise NotImplementedError
class MultiStopCrit(StoppingCriterion):
"""Stop computation if the latest error drops below a certain threshold.
"""
def __init__(self, crits, mode='or'):
"""
Parameters
----------
crits : list of StoppingCriterion instances
For each call to MultiStopCrit all of these criterions will
be evaluated.
mode : {'and', 'or'}
Logical function to determine the multi criterion from the set
of base criteria.
"""
if not mode in ('and', 'or'):
raise ValueError, \
"A mode %r is not supported." % (mode, )
self.__mode = mode
self.__crits = crits
def __call__(self, errors):
"""Evaluate all criteria to determine the value of the multi criterion.
"""
# evaluate all crits
crits = [ c(errors) for c in self.__crits ]
if self.__mode == 'and':
return np.all(crits)
else:
return np.any(crits)
class FixedErrorThresholdStopCrit(StoppingCriterion):
"""Stop computation if the latest error drops below a certain threshold.
"""
def __init__(self, threshold):
"""Initialize with threshold.
Parameters
----------
threshold : float [0,1]
Error threshold.
"""
StoppingCriterion.__init__(self)
if threshold > 1.0 or threshold < 0.0:
raise ValueError, \
"Threshold %f is out of a reasonable range [0,1]." \
% threshold
self.__threshold = threshold
def __call__(self, errors):
"""Nothing special."""
if len(errors)==0:
return False
if errors[-1] < self.__threshold:
return True
else:
return False
threshold = property(fget=lambda x:x.__threshold)
class NStepsStopCrit(StoppingCriterion):
"""Stop computation after a certain number of steps.
"""
def __init__(self, steps):
"""Initialize with number of steps.
Parameters
----------
steps : int
Number of steps after which to stop.
"""
StoppingCriterion.__init__(self)
if steps < 0:
raise ValueError, \
"Number of steps %i is out of a reasonable range." \
% steps
self.__steps = steps
def __call__(self, errors):
"""Nothing special."""
if len(errors) >= self.__steps:
return True
else:
return False
steps = property(fget=lambda x:x.__steps)
class NBackHistoryStopCrit(StoppingCriterion):
"""Stop computation if for a number of steps error was increasing
"""
def __init__(self, bestdetector=BestDetector(), steps=10):
"""Initialize with number of steps
Parameters
----------
bestdetector : BestDetector
used to determine where the best error is located.
steps : int
How many steps to check after optimal value.
"""
StoppingCriterion.__init__(self)
if steps < 0:
raise ValueError, \
"Number of steps (got %d) should be non-negative" % steps
self.__bestdetector = bestdetector
self.__steps = steps
def __call__(self, errors):
stop = False
# just to prevent ValueError
if len(errors)==0:
return stop
# charge best detector
self.__bestdetector(errors)
# if number of elements after the min >= len -- stop
if len(errors) - self.__bestdetector.bestindex > self.__steps:
stop = True
return stop
steps = property(fget=lambda x:x.__steps)
class ElementSelector(ClassWithCollections):
"""Base class to implement functors to select some elements based on a
sequence of values.
"""
ndiscarded = ConditionalAttribute(enabled=True,
doc="Store number of discarded elements.")
def __init__(self, mode='discard', **kwargs):
"""
Parameters
----------
mode : {'discard', 'select'}
Decides whether to `select` or to `discard` features.
"""
ClassWithCollections.__init__(self, **kwargs)
self._set_mode(mode)
"""Flag whether to select or to discard elements."""
##REF: Name was automagically refactored
def _set_mode(self, mode):
"""Choose `select` or `discard` mode."""
if not mode in ['discard', 'select']:
raise ValueError, "Unkown selection mode [%s]. Can only be one " \
"of 'select' or 'discard'." % mode
self.__mode = mode
def __call__(self, seq):
"""
Parameters
----------
seq
Sequence based on values of which to perform the selection.
If `Dataset`, then only 1st sample is taken.
"""
if isinstance(seq, AttrDataset):
if len(seq)>1:
raise ValueError(
"Feature selectors cannot handle multiple "
"sequences in a Dataset at once. We got dataset %s "
"as input."
% (seq,))
seq = seq.samples[0]
elif hasattr(seq, 'shape'):
shape = seq.shape
if len(shape) > 1:
raise ValueError(
"Feature selectors cannot handle multidimensional "
"inputs (such as ndarrays with more than a single "
"dimension. We got %s with shape %s "
"as input." % (seq.__class__, shape))
return self._call(seq)
def _call(self, seq):
"""Implementations in derived classed have to return a list of selected
element IDs based on the given sequence.
"""
raise NotImplementedError
mode = property(fget=lambda self:self.__mode, fset=_set_mode)
class RangeElementSelector(ElementSelector):
"""Select elements based on specified range of values"""
def __init__(self, lower=None, upper=None, inclusive=False,
mode='select', **kwargs):
"""Initialization `RangeElementSelector`
Parameters
----------
lower
If not None -- select elements which are above of
specified value
upper
If not None -- select elements which are lower of
specified value
inclusive
Either to include end points
mode
overrides parent's default to be 'select' since it is more
native for RangeElementSelector
XXX TODO -- unify??
`upper` could be lower than `lower` -- then selection is done
on values <= lower or >=upper (ie tails). This would produce
the same result if called with flipped values for mode and
inclusive.
If no upper no lower is set, assuming upper,lower=0, thus
outputing non-0 elements
"""
if lower is None and upper is None:
lower, upper = 0, 0
"""Lets better return non-0 values if none of bounds is set"""
# init State before registering anything
ElementSelector.__init__(self, mode=mode, **kwargs)
self.__range = (lower, upper)
"""Values on which to base selection"""
self.__inclusive = inclusive
def _call(self, seq):
"""Returns selected IDs.
"""
lower, upper = self.__range
len_seq = len(seq)
if not lower is None:
if self.__inclusive:
selected = seq >= lower
else:
selected = seq > lower
else:
selected = np.ones( (len_seq), dtype=np.bool )
if not upper is None:
if self.__inclusive:
selected_upper = seq <= upper
else:
selected_upper = seq < upper
if not lower is None:
if lower < upper:
# regular range
selected = np.logical_and(selected, selected_upper)
else:
# outside, though that would be similar to exclude
selected = np.logical_or(selected, selected_upper)
else:
selected = selected_upper
if self.mode == 'discard':
selected = np.logical_not(selected)
result = np.where(selected)[0]
if __debug__:
debug("ES", "Selected %d out of %d elements" %
(len(result), len_seq))
return result
class TailSelector(ElementSelector):
"""Select elements from a tail of a distribution.
The default behaviour is to discard the lower tail of a given distribution.
"""
# TODO: 'both' to select from both tails
def __init__(self, tail='lower', sort=True, **kwargs):
"""Initialize TailSelector
Parameters
----------
tail : ['lower', 'upper']
Choose the tail to be processed.
sort : bool
Flag whether selected IDs will be sorted. Disable | |
from ...torch_core import *
from ...layers import *
from .awd_lstm import RNNDropout, LinearDecoder, SequentialRNN
__all__ = ['Activation', 'PositionalEncoding', 'GeLU', 'Swish', 'feed_forward', 'MultiHeadAttention', 'MultiHeadRelativeAttention',
'DecoderLayer', 'Transformer', 'TransformerXL', 'tfmer_lm_config', 'tfmer_clas_config', 'tfmer_lm_split', 'tfmer_clas_split',
'tfmerXL_lm_config', 'tfmerXL_clas_config', 'tfmerXL_lm_split', 'tfmerXL_clas_split']
Activation = Enum('Activation', 'ReLU Swish GeLU')
class PositionalEncoding(nn.Module):
"Encode the position with a sinusoid."
def __init__(self, d:int):
super().__init__()
self.register_buffer('freq', 1 / (10000 ** (torch.arange(0., d, 2.)/d)))
def forward(self, pos:Tensor, bs:int=None):
inp = torch.ger(pos, self.freq)
enc = torch.cat([inp.sin(), inp.cos()], dim=-1)
return enc
class GeLU(nn.Module):
def forward(self, x): return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
class Swish(nn.Module):
def forward(self, x): return x * torch.sigmoid(x)
_activ_func = {Activation.ReLU:nn.ReLU(inplace=True), Activation.GeLU:GeLU(), Activation.Swish: Swish}
def feed_forward(d_model:int, d_ff:int, ff_p:float=0., act:Activation=Activation.ReLU, double_drop:bool=True):
layers = [nn.Linear(d_model, d_ff), _activ_func[act]]
if double_drop: layers.append(nn.Dropout(ff_p))
return SequentialEx(*layers, nn.Linear(d_ff, d_model), nn.Dropout(ff_p), MergeLayer(), nn.LayerNorm(d_model))
class MultiHeadAttention(nn.Module):
"MutiHeadAttention."
def __init__(self, n_heads:int, d_model:int, d_head:int=None, resid_p:float=0., attn_p:float=0., bias:bool=True,
scale:bool=True):
super().__init__()
d_head = ifnone(d_head, d_model//n_heads)
self.n_heads,self.d_head,self.scale = n_heads,d_head,scale
self.attention = nn.Linear(d_model, 3 * n_heads * d_head, bias=bias)
self.out = nn.Linear(n_heads * d_head, d_model, bias=bias)
self.drop_att,self.drop_res = nn.Dropout(attn_p),nn.Dropout(resid_p)
self.ln = nn.LayerNorm(d_model)
def forward(self, x:Tensor, mask:Tensor=None, **kwargs):
return self.ln(x + self.drop_res(self.out(self._apply_attention(x, mask=mask, **kwargs))))
def _apply_attention(self, x:Tensor, mask:Tensor=None):
bs,x_len = x.size(0),x.size(1)
wq,wk,wv = torch.chunk(self.attention(x), 3, dim=-1)
wq,wk,wv = map(lambda x:x.view(bs, x.size(1), self.n_heads, self.d_head), (wq,wk,wv))
wq,wk,wv = wq.permute(0, 2, 1, 3),wk.permute(0, 2, 3, 1),wv.permute(0, 2, 1, 3)
attn_score = torch.matmul(wq, wk)
if self.scale: attn_score = attn_score.div_(self.d_head ** 0.5)
if mask is not None:
attn_score = attn_score.float().masked_fill(mask, -float('inf')).type_as(attn_score)
attn_prob = self.drop_att(F.softmax(attn_score, dim=-1))
attn_vec = torch.matmul(attn_prob, wv)
return attn_vec.permute(0, 2, 1, 3).contiguous().contiguous().view(bs, x_len, -1)
def _attention_einsum(self, x, mask=None):
# Permute and matmul is a little bit faster but this implementation is more readable
bs,x_len = x.size(0),x.size(1)
wq,wk,wv = torch.chunk(self.attention(x), 3, dim=-1)
wq,wk,wv = map(lambda x:x.view(bs, x.size(1), self.n_heads, self.d_head), (wq,wk,wv))
attn_score = torch.einsum('bind,bjnd->bijn', (wq, wk))
if self.scale: attn_score = attn_score.mul_(1/(self.d_head ** 0.5))
if mask is not None:
attn_score = attn_score.float().masked_fill(mask, -float('inf')).type_as(attn_score)
attn_prob = self.drop_att(F.softmax(attn_score, dim=2))
attn_vec = torch.einsum('bijn,bjnd->bind', (attn_prob, wv))
return attn_vec.contiguous().view(bs, x_len, -1)
#def _line_shift1(x:Tensor, mask:bool=False):
# "Shift the line i of `x` by p-i elements to the left, is `mask` puts 0s on the diagonal."
# bs,n,p,nh = x.size()
# x_pad = torch.cat([x.new_zeros(bs,n,1,nh), x], dim=2)
# x_shift = x_pad.view(bs,p + 1,n,nh)[:,1:].view_as(x)
# if mask: x_shift.mul_(torch.tril(x.new_ones(n,p), p-n)[None,:,:,None])
# return x_shift
def _line_shift(x:Tensor, mask:bool=False):
"Shift the line i of `x` by p-i elements to the left, is `mask` puts 0s on the diagonal."
bs,nh,n,p = x.size()
x_pad = torch.cat([x.new_zeros(bs,nh,n,1), x], dim=3)
x_shift = x_pad.view(bs,nh,p + 1,n)[:,:,1:].view_as(x)
if mask: x_shift.mul_(torch.tril(x.new_ones(n,p), p-n)[None,None,])
return x_shift
class MultiHeadRelativeAttention(MultiHeadAttention):
"MutiHeadAttention with relative positional encoding."
def __init__(self, n_heads:int, d_model:int, d_head:int, resid_p:float=0., attn_p:float=0., bias:bool=True,
scale:bool=True):
super().__init__(n_heads, d_model, d_head, resid_p=resid_p, attn_p=attn_p, bias=bias, scale=scale)
self.r_attn = nn.Linear(d_model, n_heads * d_head, bias=bias)
def _apply_attention(self, x:Tensor, r:Tensor=None, u:Tensor=None, v:Tensor=None, mask:Tensor=None, mem:Tensor=None):
#Notations from the paper: x input, r vector of relative distance between two elements, u et v learnable
#parameters of the model common between all layers, mask to avoid cheating and mem the previous hidden states.
bs,x_len,seq_len = x.size(0),x.size(1),r.size(0)
context = x if mem is None else torch.cat([mem, x], dim=1)
wq,wk,wv = torch.chunk(self.attention(context), 3, dim=-1)
wq = wq[:,-x_len:]
wq,wk,wv = map(lambda x:x.view(bs, x.size(1), self.n_heads, self.d_head), (wq,wk,wv))
wq,wk,wv = wq.permute(0, 2, 1, 3),wk.permute(0, 2, 3, 1),wv.permute(0, 2, 1, 3)
wkr = self.r_attn(r)
wkr = wkr.view(seq_len, self.n_heads, self.d_head)
wkr = wkr.permute(1,2,0)
#### compute attention score (AC is (a) + (c) and BS is (b) + (d) in the paper)
AC = torch.matmul(wq+u,wk)
BD = _line_shift(torch.matmul(wq+v, wkr))
if self.scale: attn_score = (AC + BD).mul_(1/(self.d_head ** 0.5))
if mask is not None:
attn_score = attn_score.float().masked_fill(mask, -float('inf')).type_as(attn_score)
attn_prob = self.drop_att(F.softmax(attn_score, dim=-1))
attn_vec = torch.matmul(attn_prob, wv)
return attn_vec.permute(0, 2, 1, 3).contiguous().view(bs, x_len, -1)
def _attention_einsum(self, x:Tensor, r:Tensor=None, u:Tensor=None, v:Tensor=None, mask:Tensor=None, mem:Tensor=None):
# Permute and matmul is a little bit faster but this implementation is more readable
bs,x_len,seq_len = x.size(0),x.size(1),r.size(0)
context = x if mem is None else torch.cat([mem, x], dim=1)
wq,wk,wv = torch.chunk(self.attention(context), 3, dim=-1)
wq = wq[:,-x_len:]
wkr = self.r_attn(r)
wq,wk,wv = map(lambda x:x.view(bs, x.size(1), self.n_heads, self.d_head), (wq,wk,wv))
wkr = wkr.view(seq_len, self.n_heads, self.d_head)
#### compute attention score (AC is (a) + (c) and BS is (b) + (d) in the paper)
AC = torch.einsum('bind,bjnd->bijn', (wq+u, wk))
BD = _line_shift1(torch.einsum('bind,jnd->bijn', (wq+v, wkr)))
attn_score = (AC + BD).mul_(1/(self.d_head ** 0.5))
if mask is not None:
attn_score = attn_score.float().masked_fill(mask, -float('inf')).type_as(attn_score)
attn_prob = self.drop_att(F.softmax(attn_score, dim=2))
attn_vec = torch.einsum('bijn,bjnd->bind', (attn_prob, wv))
return attn_vec.contiguous().view(bs, x_len, -1)
class DecoderLayer(nn.Module):
"Basic block of a Transformer model."
#Can't use Sequential directly cause more than one input...
def __init__(self, n_heads:int, d_model:int, d_head:int, d_inner:int, resid_p:float=0., attn_p:float=0., ff_p:float=0.,
bias:bool=True, scale:bool=True, act:Activation=Activation.ReLU, double_drop:bool=True,
attn_cls:Callable=MultiHeadAttention):
super().__init__()
self.mhra = attn_cls(n_heads, d_model, d_head, resid_p=resid_p, attn_p=attn_p, bias=bias, scale=scale)
self.ff = feed_forward(d_model, d_inner, ff_p=ff_p, act=act, double_drop=double_drop)
def forward(self, x:Tensor, mask:Tensor=None, **kwargs): return self.ff(self.mhra(x, mask=mask, **kwargs))
class Transformer(nn.Module):
"Transformer model: https://arxiv.org/abs/1706.03762."
def __init__(self, vocab_sz:int, ctx_len:int, n_layers:int, n_heads:int, d_model:int, d_head:int, d_inner:int,
resid_p:float=0., attn_p:float=0., ff_p:float=0., embed_p:float=0., bias:bool=True, scale:bool=True,
act:Activation=Activation.ReLU, double_drop:bool=True, attn_cls:Callable=MultiHeadAttention,
learned_pos_enc:bool=True, mask:bool=True):
super().__init__()
self.mask = mask
self.encoder = nn.Embedding(vocab_sz, d_model)
self.pos_enc = nn.Embedding(ctx_len, d_model) if learned_pos_enc else PositionalEncoding(d_model)
self.drop_emb = nn.Dropout(embed_p)
self.layers = nn.ModuleList([DecoderLayer(n_heads, d_model, d_head, d_inner, resid_p=resid_p, attn_p=attn_p,
ff_p=ff_p, bias=bias, scale=scale, act=act, double_drop=double_drop,
attn_cls=attn_cls) for k in range(n_layers)])
def reset(self): pass
def forward(self, x):
bs, x_len = x.size()
pos = torch.arange(0, x_len, device=x.device, dtype=x.dtype)
inp = self.drop_emb(self.encoder(x) + self.pos_enc(pos)[None]) #.mul_(self.d_model ** 0.5)
mask = torch.triu(x.new_ones(x_len, x_len), diagonal=1).byte()[None,None] if self.mask else None
#[None,:,:None] for einsum implementation of attention
for layer in self.layers: inp = layer(inp, mask=mask)
return ([inp],[inp]) #For the LinearDecoder
class TransformerXL(nn.Module):
"TransformerXL model: https://arxiv.org/abs/1901.02860."
def __init__(self, vocab_sz:int, ctx_len:int, n_layers:int, n_heads:int, d_model:int, d_head:int, d_inner:int,
resid_p:float=0., attn_p:float=0., ff_p:float=0., embed_p:float=0., bias:bool=False, scale:bool=True,
act:Activation=Activation.ReLU, double_drop:bool=True, attn_cls:Callable=MultiHeadRelativeAttention,
learned_pos_enc:bool=False, mask:bool=True, mem_len:int=0):
super().__init__()
self.encoder = nn.Embedding(vocab_sz, d_model)
self.pos_enc = nn.Embedding(ctx_len, d_model) if learned_pos_enc else PositionalEncoding(d_model)
self.drop_emb = nn.Dropout(embed_p)
self.u = nn.Parameter(torch.Tensor(n_heads, 1, d_head)) #Remove 1 for einsum implementation of attention
self.v = nn.Parameter(torch.Tensor(n_heads, 1, d_head)) #Remove 1 for einsum implementation of attention
self.mem_len,self.n_layers,self.d_model,self.mask = mem_len,n_layers,d_model,mask
if self.mem_len > 0: self.reset()
self.layers = nn.ModuleList([DecoderLayer(n_heads, d_model, d_head, d_inner, resid_p=resid_p, attn_p=attn_p,
ff_p=ff_p, bias=bias, scale=scale, act=act, double_drop=double_drop,
attn_cls=attn_cls) for k in range(n_layers)])
def reset(self):
"Reset the internal memory."
self.hidden = [next(self.parameters()).data.new(0) for i in range(self.n_layers+1)]
def _update_mems(self, hids):
if not getattr(self, 'hidden', False): return None
assert len(hids) == len(self.hidden), 'len(hids) != len(self.hidden)'
with torch.no_grad():
for i in range(len(hids)):
cat = torch.cat([self.hidden[i], hids[i]], dim=1)
self.hidden[i] = cat[:,-self.mem_len:].detach()
def select_hidden(self, idxs): self.hidden = [h[idxs] for h in self.hidden]
def forward(self, x):
bs,x_len = x.size()
inp = self.drop_emb(self.encoder(x)) #.mul_(self.d_model ** 0.5)
m_len = self.hidden[0].size(1) if hasattr(self, 'hidden') and len(self.hidden[0].size()) > 1 else 0
seq_len = m_len + x_len
mask = torch.triu(x.new_ones(x_len, seq_len), diagonal=1+m_len).byte()[None,None] if self.mask else None
#[None,:,:None] for einsum implementation of attention
hids = []
pos = torch.arange(seq_len-1, -1, -1, device=inp.device, dtype=inp.dtype)
pos_enc = self.pos_enc(pos)
hids.append(inp)
for i, layer in enumerate(self.layers):
mem = self.hidden[i] if self.mem_len > 0 else None
inp = layer(inp, r=pos_enc, u=self.u, v=self.v, mask=mask, mem=mem)
hids.append(inp)
core_out = inp[:,-x_len:]
if self.mem_len > 0 : self._update_mems(hids)
return (self.hidden if self.mem_len > 0 else [core_out]),[core_out]
def init_transformer(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
if hasattr(m, 'weight') and m.weight is not None: nn.init.normal_(m.weight, 0., 0.02)
if hasattr(m, 'bias') and m.bias is not None: nn.init.constant_(m.bias, 0.)
elif classname.find('LayerNorm') != -1:
if hasattr(m, 'weight') and m.weight is not None: nn.init.normal_(m.weight, 1., 0.02)
if hasattr(m, 'bias') and m.bias is not None: nn.init.constant_(m.bias, 0.)
elif classname.find('TransformerXL') != -1:
if hasattr(m, 'u'): nn.init.normal_(m.u, 0., 0.02)
if hasattr(m, 'v'): nn.init.normal_(m.v, 0., 0.02)
tfmer_lm_config = dict(ctx_len=512, n_layers=12, n_heads=12, d_model=768, d_head=64, d_inner=3072, resid_p=0.1, attn_p=0.1,
ff_p=0.1, embed_p=0.1, output_p=0., bias=True, scale=True, act=Activation.GeLU, double_drop=False,
tie_weights=True, out_bias=False, init=init_transformer, mask=True)
tfmer_clas_config = dict(ctx_len=512, n_layers=12, n_heads=12, d_model=768, d_head=64, d_inner=3072, resid_p=0.1, attn_p=0.1,
ff_p=0.1, embed_p=0.1, output_p=0., bias=True, scale=True, act=Activation.GeLU, double_drop=False,
init=init_transformer, mask=False)
def tfmer_lm_split(model:nn.Module) -> List[nn.Module]:
"Split a RNN `model` in groups for differential learning rates."
encoder = model[0]
n = len(encoder.layers)//3
groups = [list(encoder.layers[:n]), list(encoder.layers[n:2*n]), list(encoder.layers[2*n:])]
return groups + [[encoder.encoder, model[1]]]
def tfmer_clas_split(model:nn.Module) -> List[nn.Module]:
"Split a RNN `model` in groups for differential learning rates."
| |
import logging
from collections import defaultdict, namedtuple
from datetime import datetime
from threading import RLock
from typing import TYPE_CHECKING
from networkx import DiGraph, single_source_shortest_path
from sqlalchemy import or_
from sqlalchemy.orm import aliased
from sqlalchemy.sql import label, literal
from grouper.models.counter import Counter
from grouper.models.group import Group
from grouper.models.group_edge import GROUP_EDGE_ROLES, GroupEdge
from grouper.models.group_service_accounts import GroupServiceAccount
from grouper.models.permission import MappedPermission, Permission
from grouper.models.permission_map import PermissionMap
from grouper.models.public_key import PublicKey
from grouper.models.service_account import ServiceAccount
from grouper.models.user import User
from grouper.models.user_metadata import UserMetadata
from grouper.models.user_password import UserPassword
from grouper.plugin import get_plugin_proxy
from grouper.public_key import get_all_public_key_tags
from grouper.role_user import is_role_user
from grouper.service_account import all_service_account_permissions
from grouper.util import singleton
if TYPE_CHECKING:
from grouper.service_account import ServiceAccountPermission
from typing import Any, Dict, List, Optional, Set
MEMBER_TYPE_MAP = {"User": "users", "Group": "subgroups"}
EPOCH = datetime(1970, 1, 1)
@singleton
def Graph():
# type: () -> GroupGraph
return GroupGraph()
# A GroupGraph caches users, permissions, and groups as objects which are intended
# to behave like the corresponding models but without any connection to SQL
# backend.
PermissionTuple = namedtuple(
"PermissionTuple", ["id", "name", "description", "created_on", "audited"]
)
GroupTuple = namedtuple(
"GroupTuple",
["id", "groupname", "name", "description", "canjoin", "enabled", "service_account", "type"],
)
# Raise these exceptions when asking about users or groups that are not cached.
class NoSuchUser(Exception):
pass
class NoSuchGroup(Exception):
pass
class GroupGraph(object):
"""The cached permission graph.
Attributes:
lock: Read lock on the data
update_lock: Write lock on the data
users: Names of all enabled users
groups: Names of all enabled groups
permissions: Names of all enabled permissions
checkpoint: Revision of Grouper data
checkpoint_time: Last update time of Grouper data
user_metadata: Full information about each user
group_metadata: Full information about each group
group_service_accounts: Service accounts owned by groups
permission_metadata: Permission grant information for users
service_account_permissions: Permission grant information for service accounts
permission_tuples: Metadata for all enabled permissions
group_tuples: Metadata for all enabled groups
disabled_group_tuples: Metadata for all disabled groups
"""
def __init__(self):
# type: () -> None
self.logger = logging.getLogger(__name__)
self._graph = None # type: Optional[DiGraph]
self._rgraph = None # type: Optional[DiGraph]
self.lock = RLock()
self.update_lock = RLock()
self.users = set() # type: Set[str]
self.groups = set() # type: Set[str]
self.permissions = set() # type: Set[str]
self.checkpoint = 0
self.checkpoint_time = 0
self.user_metadata = {} # type: Dict[str, Dict[str, Any]]
self.group_metadata = {} # type: Dict[str, Dict[str, Any]]
self.group_service_accounts = {} # type: Dict[str, List[str]]
self.permission_metadata = {} # type: Dict[str, List[MappedPermission]]
self.service_account_permissions = {} # type: Dict[str, List[ServiceAccountPermission]]
self.permission_tuples = set() # type: Set[PermissionTuple]
self.group_tuples = {} # type: Dict[str, GroupTuple]
self.disabled_group_tuples = {} # type: Dict[str, GroupTuple]
@property
def nodes(self):
with self.lock:
return self._graph.nodes()
@property
def edges(self):
with self.lock:
return self._graph.edges()
@classmethod
def from_db(cls, session):
inst = cls()
inst.update_from_db(session)
return inst
def update_from_db(self, session):
# Only allow one thread at a time to construct a fresh graph.
with self.update_lock:
checkpoint, checkpoint_time = self._get_checkpoint(session)
if checkpoint == self.checkpoint:
self.logger.debug("Checkpoint hasn't changed. Not Updating.")
return
self.logger.debug("Checkpoint changed; updating!")
new_graph = DiGraph()
new_graph.add_nodes_from(self._get_nodes_from_db(session))
new_graph.add_edges_from(self._get_edges_from_db(session))
rgraph = new_graph.reverse()
users = set()
groups = set()
for (node_type, node_name) in new_graph.nodes():
if node_type == "User":
users.add(node_name)
elif node_type == "Group":
groups.add(node_name)
user_metadata = self._get_user_metadata(session)
permission_metadata = self._get_permission_metadata(session)
service_account_permissions = all_service_account_permissions(session)
group_metadata = self._get_group_metadata(session, permission_metadata)
group_service_accounts = self._get_group_service_accounts(session)
permission_tuples = self._get_permission_tuples(session)
group_tuples = self._get_group_tuples(session)
disabled_group_tuples = self._get_group_tuples(session, enabled=False)
with self.lock:
self._graph = new_graph
self._rgraph = rgraph
self.checkpoint = checkpoint
self.checkpoint_time = checkpoint_time
self.users = users
self.groups = groups
self.permissions = {
perm.permission
for perm_list in permission_metadata.values()
for perm in perm_list
}
self.user_metadata = user_metadata
self.group_metadata = group_metadata
self.group_service_accounts = group_service_accounts
self.permission_metadata = permission_metadata
self.service_account_permissions = service_account_permissions
self.permission_tuples = permission_tuples
self.group_tuples = group_tuples
self.disabled_group_tuples = disabled_group_tuples
@staticmethod
def _get_checkpoint(session):
counter = session.query(Counter).filter_by(name="updates").scalar()
if counter is None:
return 0, 0
return counter.count, int(counter.last_modified.strftime("%s"))
@staticmethod
def _get_user_metadata(session):
"""
Returns a dict of username: { dict of metadata }.
"""
def user_indexify(data):
ret = defaultdict(list)
for item in data:
ret[item.user_id].append(item)
return ret
users = session.query(User)
passwords = user_indexify(session.query(UserPassword).all())
public_keys = user_indexify(session.query(PublicKey).all())
user_metadata = user_indexify(session.query(UserMetadata).all())
public_key_tags = get_all_public_key_tags(session)
out = {}
for user in users:
out[user.username] = {
"enabled": user.enabled,
"role_user": user.role_user,
"passwords": [
{
"name": password.name,
"hash": password.password_hash,
"salt": password.salt,
"func": "crypt(3)-$6$",
}
for password in passwords.get(user.id, [])
],
"public_keys": [
{
"public_key": key.public_key,
"fingerprint": key.fingerprint,
"fingerprint_sha256": key.fingerprint_sha256,
"created_on": str(key.created_on),
"tags": [tag.name for tag in public_key_tags.get(key.id, [])],
"id": key.id,
}
for key in public_keys.get(user.id, [])
],
"metadata": [
{
"data_key": row.data_key,
"data_value": row.data_value,
"last_modified": str(row.last_modified),
}
for row in user_metadata.get(user.id, [])
],
}
if user.is_service_account:
account = user.service_account
out[user.username]["service_account"] = {
"description": account.description,
"machine_set": account.machine_set,
}
if account.owner:
out[user.username]["service_account"]["owner"] = account.owner.group.name
return out
# This describes how permissions are assigned to groups, NOT the intrinsic
# metadata for a permission.
@staticmethod
def _get_permission_metadata(session):
"""
Returns a dict of groupname: { list of permissions }. Note
that disabled permissions are not included.
"""
out = defaultdict(list) # groupid -> [ ... ]
permissions = session.query(Permission, PermissionMap).filter(
Permission.id == PermissionMap.permission_id,
PermissionMap.group_id == Group.id,
Group.enabled == True,
Permission.enabled == True,
)
for (permission, permission_map) in permissions:
out[permission_map.group.name].append(
MappedPermission(
permission=permission.name,
audited=permission.audited,
argument=permission_map.argument,
groupname=permission_map.group.name,
granted_on=permission_map.granted_on,
alias=False,
)
)
aliases = get_plugin_proxy().get_aliases_for_mapped_permission(
session, permission.name, permission_map.argument
)
for (name, arg) in aliases:
out[permission_map.group.name].append(
MappedPermission(
permission=name,
audited=permission.audited,
argument=arg,
groupname=permission_map.group.name,
granted_on=permission_map.granted_on,
alias=True,
)
)
return out
@staticmethod
def _get_permission_tuples(session):
"""
Returns a set of PermissionTuple instances.
"""
# TODO: import here to avoid circular dependency
from grouper.permissions import get_all_permissions
out = set()
permissions = get_all_permissions(session)
for permission in permissions:
out.add(
PermissionTuple(
id=permission.id,
name=permission.name,
description=permission.description,
created_on=permission.created_on,
audited=permission._audited,
)
)
return out
@staticmethod
def _get_group_metadata(session, permission_metadata):
"""
Returns a dict of groupname: { dict of metadata }.
"""
groups = session.query(Group).filter(Group.enabled == True)
out = {}
for group in groups:
out[group.groupname] = {
"permissions": [
{"permission": permission.permission, "argument": permission.argument}
for permission in permission_metadata[group.id]
],
"contacts": {"email": group.email_address},
}
return out
@staticmethod
def _get_group_service_accounts(session):
"""
Returns a dict of groupname: { list of service account names }.
"""
out = defaultdict(list)
tuples = session.query(Group, ServiceAccount).filter(
GroupServiceAccount.group_id == Group.id,
GroupServiceAccount.service_account_id == ServiceAccount.id,
)
for group, account in tuples:
out[group.groupname].append(account.user.username)
return out
@staticmethod
def _get_group_tuples(session, enabled=True):
"""
Returns a dict of groupname: GroupTuple.
"""
out = {}
groups = (session.query(Group).order_by(Group.groupname)).filter(Group.enabled == enabled)
for group in groups:
out[group.groupname] = GroupTuple(
id=group.id,
groupname=group.groupname,
name=group.groupname,
description=group.description,
canjoin=group.canjoin,
enabled=group.enabled,
service_account=is_role_user(session, group=group),
type="Group",
)
return out
@staticmethod
def _get_nodes_from_db(session):
return (
session.query(label("type", literal("User")), label("name", User.username))
.filter(User.enabled == True)
.union(
session.query(
label("type", literal("Group")), label("name", Group.groupname)
).filter(Group.enabled == True)
)
.all()
)
@staticmethod
def _get_edges_from_db(session):
parent = aliased(Group)
group_member = aliased(Group)
user_member = aliased(User)
edges = []
now = datetime.utcnow()
query = (
session.query(
label("groupname", parent.groupname),
label("type", literal("Group")),
label("name", group_member.groupname),
label("role", GroupEdge._role),
)
.filter(
parent.id == GroupEdge.group_id,
group_member.id == GroupEdge.member_pk,
GroupEdge.active == True,
parent.enabled == True,
group_member.enabled == True,
or_(GroupEdge.expiration > now, GroupEdge.expiration == None),
GroupEdge.member_type == 1,
)
.union(
session.query(
label("groupname", parent.groupname),
label("type", literal("User")),
label("name", user_member.username),
label("role", GroupEdge._role),
).filter(
parent.id == GroupEdge.group_id,
user_member.id == GroupEdge.member_pk,
GroupEdge.active == True,
parent.enabled == True,
user_member.enabled == True,
or_(GroupEdge.expiration > now, GroupEdge.expiration == None),
GroupEdge.member_type == 0,
)
)
)
for record in query.all():
edges.append(
(("Group", record.groupname), (record.type, record.name), {"role": record.role})
)
return edges
def get_permissions(self, audited=False):
# type: (bool) -> List[PermissionTuple]
"""Get the list of permissions as PermissionTuple instances."""
with self.lock:
if audited:
permissions = [p for p in self.permission_tuples if p.audited]
else:
permissions = list(self.permission_tuples)
return permissions
def get_permission_details(self, name, expose_aliases=True):
""" Get a permission and what groups and service accounts it's assigned to. """
with self.lock:
data = {"groups": {}, "service_accounts": {}}
# Get all mapped versions of the permission. This is only direct relationships.
direct_groups = set()
for groupname, permissions in self.permission_metadata.iteritems():
for permission in permissions:
if permission.permission == name:
data["groups"][groupname] = self.get_group_details(
groupname, show_permission=name, expose_aliases=expose_aliases
)
direct_groups.add(groupname)
# Now find all members of these groups going down the tree.
checked_groups = set()
for groupname in direct_groups:
group = ("Group", groupname)
paths = single_source_shortest_path(self._graph, group, None)
for member, path in paths.iteritems():
if member == group:
continue
member_type, member_name = member
if member_type != "Group":
continue
if member_name in checked_groups:
continue
checked_groups.add(member_name)
data["groups"][member_name] = self.get_group_details(
member_name, show_permission=name, expose_aliases=expose_aliases
)
# Finally, add all service accounts.
for account, permissions in self.service_account_permissions.iteritems():
for permission | |
rhs
def test_setitem_listlike_indexer_duplicate_columns(self):
# GH#38604
df = DataFrame([[1, 2, 3]], columns=["a", "b", "b"])
rhs = DataFrame([[10, 11, 12]], columns=["a", "b", "b"])
df[["a", "b"]] = rhs
expected = DataFrame([[10, 11, 12]], columns=["a", "b", "b"])
tm.assert_frame_equal(df, expected)
df[["c", "b"]] = rhs
expected = DataFrame([[10, 11, 12, 10]], columns=["a", "b", "b", "c"])
tm.assert_frame_equal(df, expected)
def test_setitem_listlike_indexer_duplicate_columns_not_equal_length(self):
# GH#39403
df = DataFrame([[1, 2, 3]], columns=["a", "b", "b"])
rhs = DataFrame([[10, 11]], columns=["a", "b"])
msg = "Columns must be same length as key"
with pytest.raises(ValueError, match=msg):
df[["a", "b"]] = rhs
def test_setitem_intervals(self):
df = DataFrame({"A": range(10)})
ser = cut(df["A"], 5)
assert isinstance(ser.cat.categories, IntervalIndex)
# B & D end up as Categoricals
# the remainder are converted to in-line objects
# containing an IntervalIndex.values
df["B"] = ser
df["C"] = np.array(ser)
df["D"] = ser.values
df["E"] = np.array(ser.values)
df["F"] = ser.astype(object)
assert is_categorical_dtype(df["B"].dtype)
assert is_interval_dtype(df["B"].cat.categories)
assert is_categorical_dtype(df["D"].dtype)
assert is_interval_dtype(df["D"].cat.categories)
# These go through the Series constructor and so get inferred back
# to IntervalDtype
assert is_interval_dtype(df["C"])
assert is_interval_dtype(df["E"])
# But the Series constructor doesn't do inference on Series objects,
# so setting df["F"] doesn't get cast back to IntervalDtype
assert is_object_dtype(df["F"])
# they compare equal as Index
# when converted to numpy objects
c = lambda x: Index(np.array(x))
tm.assert_index_equal(c(df.B), c(df.B))
tm.assert_index_equal(c(df.B), c(df.C), check_names=False)
tm.assert_index_equal(c(df.B), c(df.D), check_names=False)
tm.assert_index_equal(c(df.C), c(df.D), check_names=False)
# B & D are the same Series
tm.assert_series_equal(df["B"], df["B"])
tm.assert_series_equal(df["B"], df["D"], check_names=False)
# C & E are the same Series
tm.assert_series_equal(df["C"], df["C"])
tm.assert_series_equal(df["C"], df["E"], check_names=False)
def test_setitem_categorical(self):
# GH#35369
df = DataFrame({"h": Series(list("mn")).astype("category")})
df.h = df.h.cat.reorder_categories(["n", "m"])
expected = DataFrame(
{"h": Categorical(["m", "n"]).reorder_categories(["n", "m"])}
)
tm.assert_frame_equal(df, expected)
def test_setitem_with_empty_listlike(self):
# GH#17101
index = Index([], name="idx")
result = DataFrame(columns=["A"], index=index)
result["A"] = []
expected = DataFrame(columns=["A"], index=index)
tm.assert_index_equal(result.index, expected.index)
@pytest.mark.parametrize(
"cols, values, expected",
[
(["C", "D", "D", "a"], [1, 2, 3, 4], 4), # with duplicates
(["D", "C", "D", "a"], [1, 2, 3, 4], 4), # mixed order
(["C", "B", "B", "a"], [1, 2, 3, 4], 4), # other duplicate cols
(["C", "B", "a"], [1, 2, 3], 3), # no duplicates
(["B", "C", "a"], [3, 2, 1], 1), # alphabetical order
(["C", "a", "B"], [3, 2, 1], 2), # in the middle
],
)
def test_setitem_same_column(self, cols, values, expected):
# GH#23239
df = DataFrame([values], columns=cols)
df["a"] = df["a"]
result = df["a"].values[0]
assert result == expected
def test_setitem_multi_index(self):
# GH#7655, test that assigning to a sub-frame of a frame
# with multi-index columns aligns both rows and columns
it = ["jim", "joe", "jolie"], ["first", "last"], ["left", "center", "right"]
cols = MultiIndex.from_product(it)
index = date_range("20141006", periods=20)
vals = np.random.randint(1, 1000, (len(index), len(cols)))
df = DataFrame(vals, columns=cols, index=index)
i, j = df.index.values.copy(), it[-1][:]
np.random.shuffle(i)
df["jim"] = df["jolie"].loc[i, ::-1]
tm.assert_frame_equal(df["jim"], df["jolie"])
np.random.shuffle(j)
df[("joe", "first")] = df[("jolie", "last")].loc[i, j]
tm.assert_frame_equal(df[("joe", "first")], df[("jolie", "last")])
np.random.shuffle(j)
df[("joe", "last")] = df[("jolie", "first")].loc[i, j]
tm.assert_frame_equal(df[("joe", "last")], df[("jolie", "first")])
@pytest.mark.parametrize(
"columns,box,expected",
[
(
["A", "B", "C", "D"],
7,
DataFrame(
[[7, 7, 7, 7], [7, 7, 7, 7], [7, 7, 7, 7]],
columns=["A", "B", "C", "D"],
),
),
(
["C", "D"],
[7, 8],
DataFrame(
[[1, 2, 7, 8], [3, 4, 7, 8], [5, 6, 7, 8]],
columns=["A", "B", "C", "D"],
),
),
(
["A", "B", "C"],
np.array([7, 8, 9], dtype=np.int64),
DataFrame([[7, 8, 9], [7, 8, 9], [7, 8, 9]], columns=["A", "B", "C"]),
),
(
["B", "C", "D"],
[[7, 8, 9], [10, 11, 12], [13, 14, 15]],
DataFrame(
[[1, 7, 8, 9], [3, 10, 11, 12], [5, 13, 14, 15]],
columns=["A", "B", "C", "D"],
),
),
(
["C", "A", "D"],
np.array([[7, 8, 9], [10, 11, 12], [13, 14, 15]], dtype=np.int64),
DataFrame(
[[8, 2, 7, 9], [11, 4, 10, 12], [14, 6, 13, 15]],
columns=["A", "B", "C", "D"],
),
),
(
["A", "C"],
DataFrame([[7, 8], [9, 10], [11, 12]], columns=["A", "C"]),
DataFrame(
[[7, 2, 8], [9, 4, 10], [11, 6, 12]], columns=["A", "B", "C"]
),
),
],
)
def test_setitem_list_missing_columns(self, columns, box, expected):
# GH#29334
df = DataFrame([[1, 2], [3, 4], [5, 6]], columns=["A", "B"])
df[columns] = box
tm.assert_frame_equal(df, expected)
def test_setitem_list_of_tuples(self, float_frame):
tuples = list(zip(float_frame["A"], float_frame["B"]))
float_frame["tuples"] = tuples
result = float_frame["tuples"]
expected = Series(tuples, index=float_frame.index, name="tuples")
tm.assert_series_equal(result, expected)
def test_setitem_iloc_generator(self):
# GH#39614
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
indexer = (x for x in [1, 2])
df.iloc[indexer] = 1
expected = DataFrame({"a": [1, 1, 1], "b": [4, 1, 1]})
tm.assert_frame_equal(df, expected)
def test_setitem_iloc_two_dimensional_generator(self):
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
indexer = (x for x in [1, 2])
df.iloc[indexer, 1] = 1
expected = DataFrame({"a": [1, 2, 3], "b": [4, 1, 1]})
tm.assert_frame_equal(df, expected)
class TestSetitemTZAwareValues:
@pytest.fixture
def idx(self):
naive = DatetimeIndex(["2013-1-1 13:00", "2013-1-2 14:00"], name="B")
idx = naive.tz_localize("US/Pacific")
return idx
@pytest.fixture
def expected(self, idx):
expected = Series(np.array(idx.tolist(), dtype="object"), name="B")
assert expected.dtype == idx.dtype
return expected
def test_setitem_dt64series(self, idx, expected):
# convert to utc
df = DataFrame(np.random.randn(2, 1), columns=["A"])
df["B"] = idx
with tm.assert_produces_warning(FutureWarning) as m:
df["B"] = idx.to_series(keep_tz=False, index=[0, 1])
msg = "do 'idx.tz_convert(None)' before calling"
assert msg in str(m[0].message)
result = df["B"]
comp = Series(idx.tz_convert("UTC").tz_localize(None), name="B")
tm.assert_series_equal(result, comp)
def test_setitem_datetimeindex(self, idx, expected):
# setting a DataFrame column with a tzaware DTI retains the dtype
df = DataFrame(np.random.randn(2, 1), columns=["A"])
# assign to frame
df["B"] = idx
result = df["B"]
tm.assert_series_equal(result, expected)
def test_setitem_object_array_of_tzaware_datetimes(self, idx, expected):
# setting a DataFrame column with a tzaware DTI retains the dtype
df = DataFrame(np.random.randn(2, 1), columns=["A"])
# object array of datetimes with a tz
df["B"] = idx.to_pydatetime()
result = df["B"]
tm.assert_series_equal(result, expected)
class TestDataFrameSetItemWithExpansion:
# TODO(ArrayManager) update parent (_maybe_update_cacher)
@td.skip_array_manager_not_yet_implemented
def test_setitem_listlike_views(self):
# GH#38148
df = DataFrame({"a": [1, 2, 3], "b": [4, 4, 6]})
# get one column as a view of df
ser = df["a"]
# add columns with list-like indexer
df[["c", "d"]] = np.array([[0.1, 0.2], [0.3, 0.4], [0.4, 0.5]])
# edit in place the first column to check view semantics
df.iloc[0, 0] = 100
expected = Series([100, 2, 3], name="a")
tm.assert_series_equal(ser, expected)
def test_setitem_string_column_numpy_dtype_raising(self):
# GH#39010
df = DataFrame([[1, 2], [3, 4]])
df["0 - Name"] = [5, 6]
expected = DataFrame([[1, 2, 5], [3, 4, 6]], columns=[0, 1, "0 - Name"])
tm.assert_frame_equal(df, expected)
def test_setitem_empty_df_duplicate_columns(self):
# GH#38521
df = DataFrame(columns=["a", "b", "b"], dtype="float64")
df.loc[:, "a"] = list(range(2))
expected = DataFrame(
[[0, np.nan, np.nan], [1, np.nan, np.nan]], columns=["a", "b", "b"]
)
tm.assert_frame_equal(df, expected)
def test_setitem_with_expansion_categorical_dtype(self):
# assignment
df = DataFrame(
{"value": np.array(np.random.randint(0, 10000, 100), dtype="int32")}
)
labels = Categorical([f"{i} - {i + 499}" for i in range(0, 10000, 500)])
df = df.sort_values(by=["value"], ascending=True)
ser = cut(df.value, range(0, 10500, 500), right=False, labels=labels)
cat = ser.values
# setting with a Categorical
df["D"] = cat
str(df)
result = df.dtypes
expected = Series(
[np.dtype("int32"), CategoricalDtype(categories=labels, ordered=False)],
index=["value", "D"],
)
tm.assert_series_equal(result, expected)
# setting with a Series
df["E"] = ser
str(df)
result = df.dtypes
expected = Series(
[
np.dtype("int32"),
CategoricalDtype(categories=labels, ordered=False),
CategoricalDtype(categories=labels, ordered=False),
],
index=["value", "D", "E"],
)
tm.assert_series_equal(result, expected)
result1 = df["D"]
result2 = df["E"]
tm.assert_categorical_equal(result1._mgr.array, cat)
# sorting
ser.name = "E"
tm.assert_series_equal(result2.sort_index(), ser.sort_index())
def test_setitem_scalars_no_index(self):
# GH#16823 / GH#17894
df = DataFrame()
df["foo"] = 1
expected = DataFrame(columns=["foo"]).astype(np.int64)
tm.assert_frame_equal(df, expected)
def test_setitem_newcol_tuple_key(self, float_frame):
assert (
"A",
"B",
) not in float_frame.columns
float_frame["A", "B"] = float_frame["A"]
assert ("A", "B") in float_frame.columns
result = float_frame["A", "B"]
expected = float_frame["A"]
tm.assert_series_equal(result, expected, check_names=False)
def test_frame_setitem_newcol_timestamp(self):
# GH#2155
columns = date_range(start="1/1/2012", end="2/1/2012", freq=BDay())
data = DataFrame(columns=columns, index=range(10))
t = datetime(2012, 11, 1)
ts = Timestamp(t)
data[ts] = np.nan # works, mostly a smoke-test
assert np.isnan(data[ts]).all()
class TestDataFrameSetItemSlicing:
def test_setitem_slice_position(self):
# GH#31469
df = DataFrame(np.zeros((100, 1)))
df[-4:] = 1
arr = np.zeros((100, 1))
arr[-4:] = 1
expected = DataFrame(arr)
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize("indexer", [tm.setitem, tm.iloc])
@pytest.mark.parametrize("box", [Series, np.array, list])
@pytest.mark.parametrize("n", [1, 2, 3])
def test_setitem_broadcasting_rhs(self, n, box, indexer):
# GH#40440
# TODO: Add pandas array as box after GH#40933 is fixed
df = DataFrame([[1, 3, 5]] + [[2, 4, 6]] * n, columns=["a", "b", "c"])
indexer(df)[1:] = box([10, 11, 12])
expected = DataFrame([[1, 3, 5]] + | |
<filename>src/covid19sim/plotting/extract_tracker_metrics.py
"""
Extracts metrics from tracker
"""
import numpy as np
import datetime
from covid19sim.utils.constants import POSITIVE_TEST_RESULT, NEGATIVE_TEST_RESULT
def SEIR_Map(state):
"""
Encodes the literal SEIR state to an integer.
Args:
(str): State of the human i.e. S, E, I, or R
"""
if state == "S":
return 0
if state == "E":
return 1
if state == "I":
return 2
if state == "R":
return 3
def get_quarantined_states(data):
"""
Extracts relevant keys and values from tracker data to return a compact numpy array representing SEIR states of humans
Args:
(dict): tracker data loaded from pkl file.
Returns:
humans_quarantined_state (np.array): 2D binary array of (n_people, n_days) where each element is 1 if human was quarantining at that time
"""
all_humans = sorted(data['humans_state'].keys(), key = lambda x: int(x.split(":")[-1]))
assert len(all_humans) == data['n_humans']
n_obs = len(data['humans_quarantined_state'][all_humans[0]])
humans_quarantined_state = np.zeros((data['n_humans'], n_obs))
for i, human in enumerate(all_humans):
humans_quarantined_state[i] = data['humans_quarantined_state'][human]
return humans_quarantined_state
def get_SEIR_states(data):
"""
Extracts relevant keys and values from tracker data to return a compact numpy array representing SEIR states of humans
Args:
(dict): tracker data loaded from pkl file.
Returns:
humans_state (np.array): 2D array of (n_people, n_days) where each element is integer encoded SEIR state of humans
"""
all_humans = sorted(data['humans_state'].keys(), key = lambda x: int(x.split(":")[-1]))
assert len(all_humans) == data['n_humans']
n_obs = len(data['humans_quarantined_state'][all_humans[0]])
humans_state = np.zeros((data['n_humans'], n_obs))
for i, human in enumerate(all_humans):
humans_state[i] = list(map(SEIR_Map, data['humans_state'][human]))
return humans_state
def get_SEIR_quarantined_states(data):
"""
Extracts relevant keys and values from tracker data to return a compact numpy array representing SEIR and Quarantined states of humans
Args:
(dict): tracker data loaded from pkl file.
Returns:
humans_state (np.array): 2D array of (n_people, n_days) where each element is integer encoded SEIR state of humans
humans_quarantined_state (np.array): 2D binary array of (n_people, n_days) where each element is 1 if human was quarantining at that time
"""
return get_SEIR_states(data), get_quarantined_states(data)
##################################################################
############## DAILY SERIES ##############
##################################################################
def _daily_fraction_of_population_infected(data):
"""
Returns a time series of number of daily infections as a fraction of population
Args:
(dict): tracker data loaded from pkl file.
Returns:
(np.array): 1D array where each element is fraction representing the proportion of population infected on that simulation day
"""
n_people = data['n_humans']
cases_per_day = data['cases_per_day']
cases_per_day[0] = 0 # on day 0 there are no infections but this array contains initially infected people
return np.array(cases_per_day) / n_people
def _daily_fraction_quarantine(data):
"""
Returns a time series of total number of people that quarantined on a simulation day
Args:
(dict): tracker data loaded from pkl file.
Returns:
(np.array): 1D array where each element is an integer representing the number of people quarantining on that simulation day
"""
n_people = data['n_humans']
states, quarantined_states = get_SEIR_quarantined_states(data)
daily_quarantine = quarantined_states.sum(axis=0)
return daily_quarantine / n_people
def _daily_fraction_ill(data):
"""
Returns a time series of total number of people that quarantined on a simulation day
Args:
(dict): tracker data loaded from pkl file.
Returns:
(np.array): 1D array where each element is an integer representing the number of people quarantining on that simulation day
"""
n_people = data['n_humans']
states, quarantined_states = get_SEIR_quarantined_states(data)
daily_quarantine = quarantined_states.sum(axis=0)
return daily_quarantine / n_people
def _daily_false_quarantine(data):
"""
Returns a time series of fraction of population quarantining on a simulation day
Args:
(dict): tracker data loaded from pkl file.
Returns:
(np.array): 1D array where each element is a fraction of population that false quarantined on that simulation day
"""
intervention_day = data['intervention_day']
n_people = data['n_humans']
states, quarantined_states = get_SEIR_quarantined_states(data)
states = states[:, intervention_day:]
quarantined_states = quarantined_states[:, intervention_day:]
#
false_quarantine = ((quarantined_states == 1) & ( (states == 0) | (states == 3) )).sum(axis=0)
daily_false_quarantine = false_quarantine / n_people
return daily_false_quarantine
def _daily_false_susceptible_recovered(data):
"""
Returns a time series of fraction of population that was falsely identified as non-risky i.e. susceptible or recovered
Args:
(dict): tracker data loaded from pkl file.
Returns:
(np.array): 1D array where each element is a fraction of population that was falsely classified as susceptible or recovered
"""
intervention_day = data['intervention_day']
n_people = data['n_humans']
states, quarantined_states = get_SEIR_quarantined_states(data)
states = states[:, intervention_day:]
quarantined_states = quarantined_states[:, intervention_day:]
daily_false_not_quarantine = ((quarantined_states == 0) & ((states == 1) | (states == 2))).sum(axis=0)
daily_false_not_quarantine = daily_false_not_quarantine / n_people
return daily_false_not_quarantine
def _daily_susceptible_recovered(data):
"""
Returns a time series of fraction of population that is either S or R
Args:
(dict): tracker data loaded from pkl file.
Returns:
(np.array): 1D array where each element is a fraction for that simulation day
"""
intervention_day = data['intervention_day']
n_people = data['n_humans']
states, _ = get_SEIR_quarantined_states(data)
return ((states == 0) | (states == 1)).sum(axis=0) / n_people
def _daily_infected(data):
"""
Returns a time series of fraction of population that is either E or I
Args:
(dict): tracker data loaded from pkl file.
Returns:
(np.array): 1D array where each element is a fraction for that simulation day
"""
intervention_day = data['intervention_day']
n_people = data['n_humans']
states, _ = get_SEIR_quarantined_states(data)
return ((states == 1) | (states == 2)).sum(axis=0) / n_people
def _daily_fraction_risky_classified_as_non_risky(data):
"""
Returns a time series of ** fraction of infected people ** that are not in quarantine
Args:
(dict): tracker data loaded from pkl file.
Returns:
(np.array): 1D array where each element is a fraction for each simulation day
"""
risky = _daily_infected(data)
classified_non_risky = _daily_false_susceptible_recovered(data)
m = np.zeros_like(risky)
np.divide(classified_non_risky, risky, where=risky!=0, out=m)
return m
def _daily_fraction_non_risky_classified_as_risky(data):
"""
Returns a time series of ** fraction of not-infected people (S or R) ** that are in quarantine
Args:
(dict): tracker data loaded from pkl file.
Returns:
(np.array): 1D array where each element is a fraction for each simulation day
"""
non_risky = _daily_susceptible_recovered(data)
classified_risky = _daily_false_quarantine(data)
m = np.zeros_like(non_risky)
np.divide(classified_risky, non_risky, where=non_risky!=0, out=m)
return m
def _daily_number_of_tests(data):
"""
Returns a time series of number of tests per day
Args:
(dict): tracker data loaded from pkl file.
Returns:
(np.array): 1D array where each value is the number of tests on that simulation day
"""
min_date = datetime.datetime.strptime(data['SIMULATION_START_TIME'], "%Y-%m-%d %H:%M:%S").date()
# n_days = data['simulation_days']
max_date = max(x['test_time'].date() for x in data['test_monitor'])
n_days = (max_date - min_date).days + 1
n_tests_per_day = np.zeros(n_days)
for test in data['test_monitor']:
day_idx = (test['test_time'].date() - min_date).days
n_tests_per_day[day_idx] += 1
return n_tests_per_day
def _daily_positive_test_results(data):
"""
Returns a time series of number of positive tests per simulation day
Args:
(dict): tracker data loaded from pkl file.
Returns:
(np.array): 1D array where each value is the number of positive tests on that simulation day
"""
min_date = datetime.datetime.strptime(data['SIMULATION_START_TIME'], "%Y-%m-%d %H:%M:%S").date()
# n_days = data['simulation_days']
max_date = max(x['test_time'].date() for x in data['test_monitor'])
n_days = (max_date - min_date).days + 1
n_positive_tests_per_day = np.zeros(n_days)
for test in data['test_monitor']:
if (
test['result_time'].date() <= max_date
and test['test_result'] == POSITIVE_TEST_RESULT
):
day_idx = (test['result_time'].date() - min_date).days
n_positive_tests_per_day[day_idx] += 1
return n_positive_tests_per_day
def X_daily_fraction_ill_not_working(data):
"""
Returns a time series of fraction of population that is ill and not working
Args:
(dict): tracker data loaded from pkl file.
Returns:
(np.array): 1D array where each value is the fraction of population that cancelled work due to illness
"""
raise NotImplementedError()
def _daily_fraction_cumulative_cases(data):
"""
Returns a series where each value is a true fraction of population that is infected upto some simulation day
Args:
(dict): tracker data loaded from pkl file.
Returns:
(np.array): 1D array where each value is the above described fraction
"""
x = data['cases_per_day']
return np.cumsum(x) / data['n_humans']
def _daily_incidence(data):
"""
Returns a series where each value is disease incidence i.e. infected / susceptible per 1000 people
Args:
(dict): tracker data loaded from pkl file.
Returns:
(np.array): 1D array where each value is the above described fraction
"""
daily_n_susceptible = data['s']
daily_cases = data['cases_per_day']
incidence = []
for s, n in zip(daily_n_susceptible, daily_cases[1:]):
incidence.append(n / s)
return np.array(incidence) * 1000
def _daily_prevalence(data):
"""
Returns a series where each value is a true fraction of currently infected population.
Args:
(dict): tracker data loaded from pkl file.
Returns:
(np.array): 1D array where each value is the above described fraction
"""
n_infected_per_day = data['ei_per_day']
n_people = data['n_humans']
prevalence = np.array(n_infected_per_day) / n_people
return prevalence
##################################################################
############## SCALARS ##############
##################################################################
def _mean_effective_contacts(data):
"""
Returns mean effective contacts across the population i.e. actual interactions that took place.
Args:
(dict): tracker | |
result) -> bool:
return isinstance(result, DataFrame) and result.columns.equals(
self._obj_with_exclusions.columns
)
def _define_paths(self, func, *args, **kwargs):
if isinstance(func, str):
fast_path = lambda group: getattr(group, func)(*args, **kwargs)
slow_path = lambda group: group.apply(
lambda x: getattr(x, func)(*args, **kwargs), axis=self.axis
)
else:
fast_path = lambda group: func(group, *args, **kwargs)
slow_path = lambda group: group.apply(
lambda x: func(x, *args, **kwargs), axis=self.axis
)
return fast_path, slow_path
def _choose_path(self, fast_path: Callable, slow_path: Callable, group: DataFrame):
path = slow_path
res = slow_path(group)
# if we make it here, test if we can use the fast path
try:
res_fast = fast_path(group)
except AssertionError:
raise # pragma: no cover
except Exception:
# GH#29631 For user-defined function, we can't predict what may be
# raised; see test_transform.test_transform_fastpath_raises
return path, res
# verify fast path does not change columns (and names), otherwise
# its results cannot be joined with those of the slow path
if not isinstance(res_fast, DataFrame):
return path, res
if not res_fast.columns.equals(group.columns):
return path, res
if res_fast.equals(res):
path = fast_path
return path, res
def _transform_item_by_item(self, obj: DataFrame, wrapper) -> DataFrame:
# iterate through columns, see test_transform_exclude_nuisance
# gets here with non-unique columns
output = {}
inds = []
for i, (colname, sgb) in enumerate(self._iterate_column_groupbys(obj)):
try:
output[i] = sgb.transform(wrapper)
except TypeError:
# e.g. trying to call nanmean with string values
warn_dropping_nuisance_columns_deprecated(type(self), "transform")
else:
inds.append(i)
if not output:
raise TypeError("Transform function invalid for data types")
columns = obj.columns.take(inds)
result = self.obj._constructor(output, index=obj.index)
result.columns = columns
return result
def filter(self, func, dropna=True, *args, **kwargs):
"""
Return a copy of a DataFrame excluding filtered elements.
Elements from groups are filtered if they do not satisfy the
boolean criterion specified by func.
Parameters
----------
func : function
Function to apply to each subframe. Should return True or False.
dropna : Drop groups that do not pass the filter. True by default;
If False, groups that evaluate False are filled with NaNs.
Returns
-------
filtered : DataFrame
Notes
-----
Each subframe is endowed the attribute 'name' in case you need to know
which group you are working on.
Functions that mutate the passed object can produce unexpected
behavior or errors and are not supported. See :ref:`gotchas.udf-mutation`
for more details.
Examples
--------
>>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',
... 'foo', 'bar'],
... 'B' : [1, 2, 3, 4, 5, 6],
... 'C' : [2.0, 5., 8., 1., 2., 9.]})
>>> grouped = df.groupby('A')
>>> grouped.filter(lambda x: x['B'].mean() > 3.)
A B C
1 bar 2 5.0
3 bar 4 1.0
5 bar 6 9.0
"""
indices = []
obj = self._selected_obj
gen = self.grouper.get_iterator(obj, axis=self.axis)
for name, group in gen:
object.__setattr__(group, "name", name)
res = func(group, *args, **kwargs)
try:
res = res.squeeze()
except AttributeError: # allow e.g., scalars and frames to pass
pass
# interpret the result of the filter
if is_bool(res) or (is_scalar(res) and isna(res)):
if res and notna(res):
indices.append(self._get_index(name))
else:
# non scalars aren't allowed
raise TypeError(
f"filter function returned a {type(res).__name__}, "
"but expected a scalar bool"
)
return self._apply_filter(indices, dropna)
def __getitem__(self, key) -> DataFrameGroupBy | SeriesGroupBy:
if self.axis == 1:
# GH 37725
raise ValueError("Cannot subset columns when using axis=1")
# per GH 23566
if isinstance(key, tuple) and len(key) > 1:
# if len == 1, then it becomes a SeriesGroupBy and this is actually
# valid syntax, so don't raise warning
warnings.warn(
"Indexing with multiple keys (implicitly converted to a tuple "
"of keys) will be deprecated, use a list instead.",
FutureWarning,
stacklevel=2,
)
return super().__getitem__(key)
def _gotitem(self, key, ndim: int, subset=None):
"""
sub-classes to define
return a sliced object
Parameters
----------
key : string / list of selections
ndim : {1, 2}
requested ndim of result
subset : object, default None
subset to act on
"""
if ndim == 2:
if subset is None:
subset = self.obj
return DataFrameGroupBy(
subset,
self.grouper,
axis=self.axis,
level=self.level,
grouper=self.grouper,
exclusions=self.exclusions,
selection=key,
as_index=self.as_index,
sort=self.sort,
group_keys=self.group_keys,
squeeze=self.squeeze,
observed=self.observed,
mutated=self.mutated,
dropna=self.dropna,
)
elif ndim == 1:
if subset is None:
subset = self.obj[key]
return SeriesGroupBy(
subset,
level=self.level,
grouper=self.grouper,
selection=key,
sort=self.sort,
group_keys=self.group_keys,
squeeze=self.squeeze,
observed=self.observed,
dropna=self.dropna,
)
raise AssertionError("invalid ndim for _gotitem")
def _get_data_to_aggregate(self) -> Manager2D:
obj = self._obj_with_exclusions
if self.axis == 1:
return obj.T._mgr
else:
return obj._mgr
def _insert_inaxis_grouper_inplace(self, result: DataFrame) -> None:
# zip in reverse so we can always insert at loc 0
columns = result.columns
for name, lev, in_axis in zip(
reversed(self.grouper.names),
reversed(self.grouper.get_group_levels()),
reversed([grp.in_axis for grp in self.grouper.groupings]),
):
# GH #28549
# When using .apply(-), name will be in columns already
if in_axis and name not in columns:
result.insert(0, name, lev)
def _indexed_output_to_ndframe(
self, output: Mapping[base.OutputKey, ArrayLike]
) -> DataFrame:
"""
Wrap the dict result of a GroupBy aggregation into a DataFrame.
"""
indexed_output = {key.position: val for key, val in output.items()}
columns = Index([key.label for key in output])
columns._set_names(self._obj_with_exclusions._get_axis(1 - self.axis).names)
result = self.obj._constructor(indexed_output)
result.columns = columns
return result
def _wrap_agged_manager(self, mgr: Manager2D) -> DataFrame:
if not self.as_index:
# GH 41998 - empty mgr always gets index of length 0
rows = mgr.shape[1] if mgr.shape[0] > 0 else 0
index = Index(range(rows))
mgr.set_axis(1, index)
result = self.obj._constructor(mgr)
self._insert_inaxis_grouper_inplace(result)
result = result._consolidate()
else:
index = self.grouper.result_index
mgr.set_axis(1, index)
result = self.obj._constructor(mgr)
if self.axis == 1:
result = result.T
# Note: we only need to pass datetime=True in order to get numeric
# values converted
return self._reindex_output(result)._convert(datetime=True)
def _iterate_column_groupbys(self, obj: DataFrame | Series):
for i, colname in enumerate(obj.columns):
yield colname, SeriesGroupBy(
obj.iloc[:, i],
selection=colname,
grouper=self.grouper,
exclusions=self.exclusions,
)
def _apply_to_column_groupbys(self, func, obj: DataFrame | Series) -> DataFrame:
from pandas.core.reshape.concat import concat
columns = obj.columns
results = [
func(col_groupby) for _, col_groupby in self._iterate_column_groupbys(obj)
]
if not len(results):
# concat would raise
return DataFrame([], columns=columns, index=self.grouper.result_index)
else:
return concat(results, keys=columns, axis=1)
def nunique(self, dropna: bool = True) -> DataFrame:
"""
Return DataFrame with counts of unique elements in each position.
Parameters
----------
dropna : bool, default True
Don't include NaN in the counts.
Returns
-------
nunique: DataFrame
Examples
--------
>>> df = pd.DataFrame({'id': ['spam', 'egg', 'egg', 'spam',
... 'ham', 'ham'],
... 'value1': [1, 5, 5, 2, 5, 5],
... 'value2': list('abbaxy')})
>>> df
id value1 value2
0 spam 1 a
1 egg 5 b
2 egg 5 b
3 spam 2 a
4 ham 5 x
5 ham 5 y
>>> df.groupby('id').nunique()
value1 value2
id
egg 1 1
ham 1 2
spam 2 1
Check for rows with the same id but conflicting values:
>>> df.groupby('id').filter(lambda g: (g.nunique() > 1).any())
id value1 value2
0 spam 1 a
3 spam 2 a
4 ham 5 x
5 ham 5 y
"""
if self.axis != 0:
# see test_groupby_crash_on_nunique
return self._python_agg_general(lambda sgb: sgb.nunique(dropna))
obj = self._obj_with_exclusions
results = self._apply_to_column_groupbys(
lambda sgb: sgb.nunique(dropna), obj=obj
)
if not self.as_index:
results.index = Index(range(len(results)))
self._insert_inaxis_grouper_inplace(results)
return results
@Appender(DataFrame.idxmax.__doc__)
def idxmax(self, axis=0, skipna: bool = True):
axis = DataFrame._get_axis_number(axis)
numeric_only = None if axis == 0 else False
def func(df):
# NB: here we use numeric_only=None, in DataFrame it is False GH#38217
res = df._reduce(
nanops.nanargmax,
"argmax",
axis=axis,
skipna=skipna,
numeric_only=numeric_only,
)
indices = res._values
index = df._get_axis(axis)
result = [index[i] if i >= 0 else np.nan for i in indices]
return df._constructor_sliced(result, index=res.index)
func.__name__ = "idxmax"
return self._python_apply_general(func, self._obj_with_exclusions)
@Appender(DataFrame.idxmin.__doc__)
def idxmin(self, axis=0, skipna: bool = True):
axis = DataFrame._get_axis_number(axis)
numeric_only = None if axis == 0 else False
def func(df):
# NB: here we use numeric_only=None, in DataFrame it is False GH#38217
res = df._reduce(
nanops.nanargmin,
"argmin",
axis=axis,
skipna=skipna,
numeric_only=numeric_only,
)
indices = res._values
index = df._get_axis(axis)
result = [index[i] if i >= 0 else np.nan for i in indices]
return df._constructor_sliced(result, index=res.index)
func.__name__ = "idxmin"
return self._python_apply_general(func, self._obj_with_exclusions)
boxplot = boxplot_frame_groupby
def _wrap_transform_general_frame(
obj: DataFrame, group: DataFrame, res: DataFrame | Series
) -> DataFrame:
from pandas import concat
if isinstance(res, Series):
# we need to broadcast across the
# other dimension; this will preserve dtypes
# GH14457
if res.index.is_(obj.index):
res_frame = concat([res] | |
<filename>scripts/tf_cnn_benchmarks/benchmark_cnn_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for benchmark_cnn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import os
import re
import mock
import numpy as np
import tensorflow as tf
from google.protobuf import text_format
from tensorflow.core.framework import step_stats_pb2
from tensorflow.core.profiler import tfprof_log_pb2
from tensorflow.python.platform import test
import benchmark_cnn
import flags
import preprocessing
import test_util
import variable_mgr_util
from platforms import util as platforms_util
def _check_has_gpu():
if not test.is_gpu_available(cuda_only=True):
raise ValueError(
"""You have asked to run part or all of this on GPU, but it appears
that no GPU is available. If your machine has GPUs it is possible you
do not have a version of TensorFlow with GPU support. To build with GPU
support, add --config=cuda to the build flags.\n """)
class TfCnnBenchmarksModelTest(tf.test.TestCase):
"""Tests which are run with multiple models."""
def setUp(self):
super(TfCnnBenchmarksModelTest, self).setUp()
benchmark_cnn.setup(benchmark_cnn.make_params())
def get_model_name(self):
return None
# Return true to run tests that don't need to be run on every model.
# This should be done for one or two cheap models.
def extended_tests(self):
return False
# Return false to suppress actually running the model; this is useful
# for tests that are large.
def model_execution_test(self):
return False
# Return false to suppress actually saving and loading the model.
def model_save_load_test(self):
return False
def testSaveLoadModel(self):
_check_has_gpu()
if not self.get_model_name() or not self.model_save_load_test():
return
params = benchmark_cnn.make_params(
model=self.get_model_name(),
num_batches=1,
num_intra_threads=0,
num_inter_threads=0,
distortions=False,
batch_size=2,
variable_update='replicated',
num_warmup_batches=0,
num_gpus=2,
train_dir=test_util.get_temp_dir('testSaveLoadModel_' +
self.get_model_name()))
# Run one batch and save the model.
# Note that this uses a non-test session.
bench = benchmark_cnn.BenchmarkCNN(params)
bench.run()
self.assertEquals(bench.init_global_step, 0)
# Clear the default graph.
tf.reset_default_graph()
# Test if checkpoint had been saved.
ckpt = tf.train.get_checkpoint_state(params.train_dir)
match = re.match(os.path.join(params.train_dir, r'model.ckpt-(\d+).index'),
ckpt.model_checkpoint_path + '.index')
self.assertTrue(match)
self.assertGreaterEqual(int(match.group(1)), params.num_batches)
params = params._replace(num_batches=2)
# Reload the model
bench = benchmark_cnn.BenchmarkCNN(params)
bench.run()
# Check if global step has been restored.
self.assertNotEquals(bench.init_global_step, 0)
ckpt = tf.train.get_checkpoint_state(params.train_dir)
match = re.match(os.path.join(params.train_dir, r'model.ckpt-(\d+).index'),
ckpt.model_checkpoint_path + '.index')
self.assertTrue(match)
self.assertGreaterEqual(int(match.group(1)), params.num_batches)
# Check that the batch norm moving averages are restored from checkpoints
with tf.Graph().as_default():
bench = benchmark_cnn.BenchmarkCNN(params)
bench._build_model()
saver = tf.train.Saver(bench.variable_mgr.savable_variables())
with tf.Session(config=benchmark_cnn.create_config_proto(params)) as sess:
benchmark_cnn.load_checkpoint(saver, sess, params.train_dir)
sess.run(bench.variable_mgr.get_post_init_ops())
bn_moving_vars = [
v for v in tf.global_variables()
if '/batchnorm' in v.name and '/moving' in v.name
]
self.assertGreater(len(bn_moving_vars), 0)
for moving_var in bn_moving_vars:
moving_var_value = sess.run(moving_var)
# Check that the moving means and moving variances have been restored
# by asserting they are not their default values of 0 and 1,
# respectively
if '/moving_mean' in moving_var.name:
self.assertFalse(np.array_equal(moving_var_value,
np.zeros(moving_var_value.shape,
moving_var_value.dtype)))
else:
self.assertIn('/moving_variance', moving_var.name)
self.assertFalse(np.array_equal(moving_var_value,
np.ones(moving_var_value.shape,
moving_var_value.dtype)))
def testModel(self):
_check_has_gpu()
if not self.get_model_name() or not self.model_execution_test():
return
params = benchmark_cnn.make_params(
model=self.get_model_name(),
num_batches=1,
num_intra_threads=1,
num_inter_threads=12,
batch_size=2,
distortions=False)
# Run this one; note that this uses a non-test session.
bench = benchmark_cnn.BenchmarkCNN(params)
bench.run()
def testSendRecvVariables(self):
self._testVariables('parameter_server')
if self.extended_tests():
self._testVariables('parameter_server', local_parameter_device='CPU')
self._testVariables('parameter_server', optimizer='sgd')
def testReplicatedVariables(self):
self._testVariables('replicated')
if self.extended_tests():
self._testVariables('replicated', all_reduce_spec=None)
self._testVariables('replicated', use_fp16=True, fp16_vars=False)
self._testVariables(
'replicated',
all_reduce_spec=None,
use_fp16=True,
fp16_vars=False,
fp16_enable_auto_loss_scale=True,
fp16_inc_loss_scale_every_n=4)
def testIndependentVariables(self):
self._testVariables('independent')
self._testVariables(
'independent',
all_reduce_spec=None,
use_fp16=True,
fp16_vars=False,
fp16_enable_auto_loss_scale=True,
fp16_inc_loss_scale_every_n=4)
def testSummaryVerbosity(self):
self._testVariables('parameter_server', summary_verbosity=1)
if self.extended_tests():
self._testVariables('parameter_server', summary_verbosity=2)
self._testVariables('parameter_server', summary_verbosity=3)
def testStagedVariables(self):
self._testVariables('parameter_server', staged_vars=True)
if self.extended_tests():
self._testVariables('parameter_server', staged_vars=True,
local_parameter_device='CPU')
self._testVariables('parameter_server', staged_vars=True, use_fp16=True,
fp16_vars=True)
def _assert_correct_var_type(self, var, params):
if 'gpu_cached_inputs' not in var.name:
if params.use_fp16 and params.fp16_vars and 'batchnorm' not in var.name:
expected_type = tf.float16
else:
expected_type = tf.float32
self.assertEqual(var.dtype.base_dtype, expected_type)
def _testVariables(self,
variable_update,
summary_verbosity=0,
local_parameter_device='GPU',
staged_vars=False,
optimizer='momentum',
# TODO(b/80125832): Enable nccl in tests
# all_reduce_spec='nccl',
all_reduce_spec='',
use_fp16=False,
fp16_vars=False,
fp16_enable_auto_loss_scale=False,
fp16_inc_loss_scale_every_n=10):
if not self.get_model_name():
return
_check_has_gpu()
params = benchmark_cnn.make_params(
model=self.get_model_name(),
num_batches=1,
num_intra_threads=1,
num_inter_threads=12,
distortions=False,
variable_update=variable_update,
local_parameter_device=local_parameter_device,
num_gpus=2,
summary_verbosity=summary_verbosity,
staged_vars=staged_vars,
optimizer=optimizer,
all_reduce_spec=all_reduce_spec,
compact_gradient_transfer=False if all_reduce_spec == 'nccl' else True,
use_fp16=use_fp16,
fp16_loss_scale=2.,
fp16_vars=fp16_vars,
fp16_enable_auto_loss_scale=fp16_enable_auto_loss_scale,
fp16_inc_loss_scale_every_n=fp16_inc_loss_scale_every_n,
)
# Test building models using multiple GPUs, but don't
# run them.
with self.test_session(graph=tf.Graph()):
bench = benchmark_cnn.BenchmarkCNN(params)
bench._build_model()
# Rough validation of variable type and placement, depending on mode.
all_vars = tf.global_variables() + tf.local_variables()
if params.variable_update == 'parameter_server':
for v in all_vars:
tf.logging.debug('var: %s' % v.name)
match = re.match(r'tower_(\d+)/v/gpu_cached_inputs:0', v.name)
if match:
self.assertEquals(v.device, '/device:GPU:%s' % match.group(1))
elif v.name.startswith('v/'):
self.assertEquals(v.device,
'/device:%s:0' % local_parameter_device)
self._assert_correct_var_type(v, params)
elif v.name in ('input_processing/images:0',
'input_processing/labels:0', 'init_learning_rate:0',
'global_step:0', 'loss_scale:0',
'loss_scale_normal_steps:0'):
self.assertEquals(v.device, '/device:CPU:0')
else:
raise ValueError('Unexpected variable %s' % v.name)
else:
v0_count = 0
v1_count = 0
for v in all_vars:
if v.name.startswith('tower_0/v0/'):
self.assertEquals(v.name, 'tower_0/v0/gpu_cached_inputs:0')
self.assertEquals(v.device, '/device:GPU:0')
elif v.name.startswith('tower_1/v1/'):
self.assertEquals(v.name, 'tower_1/v1/gpu_cached_inputs:0')
self.assertEquals(v.device, '/device:GPU:1')
elif v.name.startswith('v0/'):
v0_count += 1
self.assertEquals(v.device, '/device:GPU:0')
self._assert_correct_var_type(v, params)
elif v.name.startswith('v1/'):
v1_count += 1
self.assertEquals(v.device, '/device:GPU:1')
self._assert_correct_var_type(v, params)
elif v.name in ('input_processing/images:0',
'input_processing/labels:0', 'init_learning_rate:0',
'global_step:0', 'loss_scale:0',
'loss_scale_normal_steps:0'):
self.assertEquals(v.device, '/device:CPU:0')
else:
raise ValueError('Unexpected variable %s' % v.name)
self.assertEquals(v0_count, v1_count)
# Validate summary ops in the model depending on verbosity level
summary_ops = tf.get_collection(tf.GraphKeys.SUMMARIES)
num_summary_ops = len(summary_ops)
self.assertEquals(num_summary_ops > 0, summary_verbosity > 0)
if summary_verbosity > 0:
has_affine_histogram = False
has_gradient_histogram = False
has_log_gradients_histogram = False
for op in summary_ops:
if '/gradients' in op.name:
has_gradient_histogram = True
elif '/affine' in op.name:
has_affine_histogram = True
elif 'log_gradients' in op.name:
has_log_gradients_histogram = True
self.assertEqual(summary_verbosity >= 3, has_affine_histogram)
self.assertEqual(summary_verbosity >= 3, has_gradient_histogram)
self.assertEqual(summary_verbosity >= 2, has_log_gradients_histogram)
if summary_verbosity == 1:
self.assertLess(num_summary_ops, 10)
class TrivialModelTest(TfCnnBenchmarksModelTest):
def get_model_name(self):
return 'trivial'
class TestVgg1Model(TfCnnBenchmarksModelTest):
def get_model_name(self):
return 'vgg11'
class TestVgg19Model(TfCnnBenchmarksModelTest):
def get_model_name(self):
return 'vgg19'
class TestLenet5Model(TfCnnBenchmarksModelTest):
def get_model_name(self):
return 'lenet'
class TestGooglenetModel(TfCnnBenchmarksModelTest):
def get_model_name(self):
return 'googlenet'
class TestOverfeatModel(TfCnnBenchmarksModelTest):
def get_model_name(self):
return 'overfeat'
class TestAlexnetModel(TfCnnBenchmarksModelTest):
def get_model_name(self):
return 'alexnet'
def extended_tests(self):
return True
class TestTrivialModel(TfCnnBenchmarksModelTest):
def get_model_name(self):
return 'trivial'
class TestInceptionv3Model(TfCnnBenchmarksModelTest):
def get_model_name(self):
return 'inception3'
def extended_tests(self):
return True
class TestInceptionv4Model(TfCnnBenchmarksModelTest):
def get_model_name(self):
return 'inception4'
class TestResnet50Model(TfCnnBenchmarksModelTest):
def get_model_name(self):
return 'resnet50'
def model_save_load_test(self):
return True
class TestResnet101Model(TfCnnBenchmarksModelTest):
def get_model_name(self):
return 'resnet101'
class TestResnet152Model(TfCnnBenchmarksModelTest):
def get_model_name(self):
return 'resnet152'
class TestResnet50V2Model(TfCnnBenchmarksModelTest):
def get_model_name(self):
return 'resnet50_v2'
class TestResnet101V2Model(TfCnnBenchmarksModelTest):
def get_model_name(self):
return 'resnet101_v2'
class TestResnet152V2Model(TfCnnBenchmarksModelTest):
def get_model_name(self):
return 'resnet152_v2'
class TfCnnBenchmarksTest(tf.test.TestCase):
"""Tests that benchmark_cnn runs correctly."""
def setUp(self):
super(TfCnnBenchmarksTest, self).setUp()
_check_has_gpu()
benchmark_cnn.setup(benchmark_cnn.make_params())
def _run_benchmark_cnn(self, params):
logs = []
benchmark_cnn.log_fn = test_util.print_and_add_to_list(logs)
benchmark_cnn.BenchmarkCNN(params).run()
return logs
def _run_benchmark_cnn_with_fake_images(self, params, images, labels):
logs = []
benchmark_cnn.log_fn = test_util.print_and_add_to_list(logs)
bench = benchmark_cnn.BenchmarkCNN(params)
bench.input_preprocessor = preprocessing.TestImagePreprocessor(
params.batch_size * params.num_gpus,
[[params.batch_size, 227, 227, 3], [params.batch_size]],
params.num_gpus,
bench.model.data_type)
bench.dataset._queue_runner_required = True
bench.input_preprocessor.set_fake_data(images, labels)
bench.input_preprocessor.expected_subset = ('validation'
if params.eval else 'train')
bench.run()
return logs
def _run_benchmark_cnn_with_black_and_white_images(self, params):
"""Runs BenchmarkCNN with black and white images.
A BenchmarkCNN is created and run with black and white images as input. Half
the images are black (i.e., filled with 0s) and half are white (i.e., filled
with 255s).
Args:
params: Params for BenchmarkCNN.
Returns:
A list of lines from the output of BenchmarkCNN.
"""
effective_batch_size = params.batch_size * params.num_gpus
half_batch_size = effective_batch_size // 2
images = np.zeros((effective_batch_size, 227, 227, 3), dtype=np.float32)
images[half_batch_size:, :, :, :] = 255
labels = np.array([0] * half_batch_size + [1] * half_batch_size,
dtype=np.int32)
return self._run_benchmark_cnn_with_fake_images(params, images, labels)
def _train_and_eval_local(self,
params,
check_output_values=False,
max_final_loss=10.,
skip=None,
use_test_preprocessor=True):
# TODO(reedwm): check_output_values should default to True and be enabled
# on every test. Currently, if check_output_values=True and the calls to
# tf.set_random_seed(...) and np.seed(...) are passed certain seed values in
# benchmark_cnn.py, then most tests will fail. This indicates the tests
# are brittle and could fail with small changes when
# check_output_values=True, so check_output_values defaults to False for
# now.
def run_fn(run_type, inner_params):
del run_type
if use_test_preprocessor:
return [
self._run_benchmark_cnn_with_black_and_white_images(inner_params)
]
else:
return [self._run_benchmark_cnn(inner_params)]
return test_util.train_and_eval(self, run_fn, params,
check_output_values=check_output_values,
max_final_loss=max_final_loss,
skip=skip)
def testAlexnet(self):
params = test_util.get_params('testAlexnet')._replace(
num_batches=30, init_learning_rate=0.01, model='alexnet')
self._train_and_eval_local(params)
def testNoPrintAccuracy(self):
params = test_util.get_params('testNoPrintAccuracy')._replace(
print_training_accuracy=False)
self._train_and_eval_local(params)
def testLowAccuracy(self):
params = test_util.get_params('testLowAccuracy')._replace(
print_training_accuracy=True, batch_size=5, num_batches=10)
# We force low accuracy by having each batch containing 10 identical images,
# each with a different label. This guarantees a top-1 accuracy of exactly
# 0.1 and a top-5 accuracy of exactly 0.5.
images = np.zeros((10, 227, 227, 3), dtype=np.float32)
labels = np.arange(10, dtype=np.int32)
| |
import configparser
import os
import random
import json
import aiohttp
import wikipedia as wiki
import urbandictionary as ud
import guilded
import pymongo
from pymongo import MongoClient
import wikipediahelper
import lowerutils
config = configparser.ConfigParser()
config.read('config.ini')
pre = config['GUILDED']['prefix']
own = config['GUILDED']['owner']
cluster = MongoClient(config['MONGO']['link'])
db = cluster[config['MONGO']['db']]
collection = db[config['MONGO']['collection']]
#pubipdo = config['HOST']['pubipdo']
#port = config['HOST']['port']
waifus = config['REQPATHS']['waifus']
awaifus = lowerutils.remove_extension(os.listdir(waifus))
bot = guilded.Bot(command_prefix= pre, owner_id=own)
""" if os.path.exists('ledgyboi.json'):
with open('ledgyboi.json', 'r') as f:
reggie = f.read()
if reggie != '':
ledgyboi = json.loads(reggie)
else:
ledgyboi = {}
with open('ledgyboi.json', 'w') as f:
reggie = json.dumps(ledgyboi)
f.write(reggie)
else:
ledgyboi = {}
with open('ledgyboi.json', 'w') as f:
reggie = json.dumps(ledgyboi)
f.write(reggie)
if os.path.exists('marketplace.json'):
with open('marketplace.json', 'r') as f:
reggie = f.read()
if reggie != '':
chutiya = json.loads(reggie)
else:
chutiya = {}
with open('marketplace.json', 'w') as f:
reggie = json.dumps(chutiya)
f.write(reggie)
else:
chutiya = {}
with open('marketplace.json', 'w') as f:
reggie = json.dumps(chutiya)
f.write(reggie)
"""
if collection.count_documents({"_id": 1}) > 0:
ledgyboi = collection.find_one({"_id": 1})['ledgyboi']
else:
ledgyboi = {}
collection.insert_one({"_id": 1, "ledgyboi": ledgyboi})
if collection.count_documents({"_id": 2}) > 0:
chutiya = collection.find_one({"_id": 2})['chutiya']
else:
chutiya = {}
collection.insert_one({"_id": 2, "chutiya": chutiya})
# initial waifu ownership population
def credit(name, amt):
ledgyboi[name]['points'] += int(amt)
updateledgyboi()
def debit(name, amt):
ledgyboi[name]['points'] -= int(amt)
updateledgyboi()
def instancenewuserorupdateolduser(name):
if name != "Soham's Bot":
if name not in ledgyboi.keys():
ledgyboi[name] = {}
ledgyboi[name]['points'] = 1
ledgyboi[name]['waifus'] = {}
else:
ledgyboi[name]['points'] += 1
updateledgyboi()
def marketlister(waifu, owner, price, visibility, name = None):
chutiya[waifu] = {}
chutiya[waifu]['Code'] = waifu
if name:
if len(name) > 28:
chutiya[waifu]['Name'] = name[0:27] + '...'
else:
chutiya[waifu]['Name'] = name
chutiya[waifu]['Owner'] = owner
chutiya[waifu]['Price'] = price
chutiya[waifu]['Visibility'] = visibility
updatechutiya()
def updateledgyboi():
collection.update_one({"_id": 1}, {"$set":{"ledgyboi": ledgyboi}})
def updatechutiya():
collection.update_one({"_id": 2}, {"$set":{"chutiya": chutiya}})
async def err(ctx):
embed = guilded.Embed(title='Oofus', description='Some error occured... \n Please notify bot creator! \n Your help will be appreciated!')
await ctx.send(embed = embed)
async def upload_file(path_to_file, media_type):
file = open(path_to_file, 'rb') # or another bytes-like object
async with aiohttp.ClientSession() as session:
response = await session.post(
'https://media.guilded.gg/media/upload',
data={'file': file},
params={'dynamicMediaTypeId': media_type}
)
data = await response.json()
return data['url']
@bot.event()
async def on_ready():
print('Comrade I have logged in as:' + bot.user.name)
@bot.event()
async def on_message(ctx):
instancenewuserorupdateolduser(ctx.author.name)
@bot.command()
async def urban(ctx):
word = ctx.content.replace(f'{pre}urban','')
if word:
try:
urband = ud.define(word)[0]
embed = guilded.Embed(title = urband.word, description = urband.definition[0:2045]+'...')
embed.set_thumbnail(url = 'https://upload.wikimedia.org/wikipedia/commons/thumb/8/82/UD_logo-01.svg/1200px-UD_logo-01.svg.png')
embed.add_field(name = 'Usage', value= str(urband.example))
embed.set_thumbnail(ctx.author.avatar_url.replace('https://img.guildedcdn.com/','https://s3-us-west-2.amazonaws.com/www.guilded.gg/'))
await ctx.send(embed=embed)
except IndexError:
embed = guilded.Embed(title='Oofus', description='Could not find the meaning...')
embed.set_thumbnail(ctx.author.avatar_url.replace('https://img.guildedcdn.com/','https://s3-us-west-2.amazonaws.com/www.guilded.gg/'))
await ctx.send(embed=embed)
else:
embed = guilded.Embed(title='wtf?', description='meaning of what?')
embed.set_thumbnail(ctx.author.avatar_url.replace('https://img.guildedcdn.com/','https://s3-us-west-2.amazonaws.com/www.guilded.gg/'))
await ctx.send(embed=embed)
@bot.command()
async def wiki(ctx):
searchquery = ctx.content.replace(f'{pre}wiki','')
search = wikipediahelper.wikipediahelp(searchquery)
embed = guilded.Embed(title = search[0], description = search[1][0:2045]+'...')
embed.add_field(name = 'Click this link for more:', value = search[2])
embed.set_thumbnail(ctx.author.avatar_url.replace('https://img.guildedcdn.com/','https://s3-us-west-2.amazonaws.com/www.guilded.gg/'))
await ctx.send(embed = embed)
@bot.command()
async def showowner(ctx):
team = ctx.team.owner_id
owner = await bot.fetch_user(team)
embed = guilded.Embed(title = "Owner is...", description = str(owner))
await ctx.send(embed = embed)
@bot.command()
async def ulostwaves(ctx):
team = ctx.team.owner_id
owner = await bot.fetch_user(team)
embed = guilded.Embed(title = "Owner is...", description = str(owner))
await ctx.send(embed = embed)
@bot.command()
async def choose(ctx):
choices = ctx.content.replace(f'{pre}choose','')
rchoices = choices.split()
embed = guilded.Embed(title="I choose...", description=f"{random.choice(rchoices)}!")
embed.set_thumbnail(ctx.author.avatar_url.replace('https://img.guildedcdn.com/','https://s3-us-west-2.amazonaws.com/www.guilded.gg/'))
await ctx.send(embed=embed)
@bot.command()
async def points(ctx):
points = ledgyboi[ctx.author.name]['points']
embed = guilded.Embed(title=f"{ctx.author.name}'s points!", description=f"You currently have... \n {points} points!")
embed.set_thumbnail(ctx.author.avatar_url.replace('https://img.guildedcdn.com/','https://s3-us-west-2.amazonaws.com/www.guilded.gg/'))
await ctx.send(embed=embed)
@bot.command()
async def ping(ctx):
embed = guilded.Embed(title='Pong!')
embed.set_thumbnail(ctx.author.avatar_url.replace('https://img.guildedcdn.com/','https://s3-us-west-2.amazonaws.com/www.guilded.gg/'))
await ctx.send(embed=embed)
@bot.command()
async def avatar(ctx, member: guilded.User=None):
if not member:
user = ctx.author
embed = guilded.Embed(title=f"{user.name}'s avatar")
embed.set_image(user.avatar_url.replace('https://img.guildedcdn.com/','https://s3-us-west-2.amazonaws.com/www.guilded.gg/'))
embed.set_thumbnail(ctx.author.avatar_url.replace('https://img.guildedcdn.com/','https://s3-us-west-2.amazonaws.com/www.guilded.gg/'))
await ctx.send(embed=embed)
elif member:
try:
user = await bot.fetch_user(member)
embed = guilded.Embed(title=f"{user.name}'s avatar")
embed.set_image(user.avatar_url.replace('https://img.guildedcdn.com/','https://s3-us-west-2.amazonaws.com/www.guilded.gg/'))
embed.set_thumbnail(ctx.author.avatar_url.replace('https://img.guildedcdn.com/','https://s3-us-west-2.amazonaws.com/www.guilded.gg/'))
await ctx.send(embed=embed)
except Exception as e:
await err(ctx)
print(e)
@bot.command()
async def userinfo(ctx, member: guilded.User=None):
if not member:
member = ctx.author
else:
member = await bot.fetch_user(member)
about = member.info
embed = guilded.Embed(title=f"{member.name} info",
description=f'Name: {member.name}\n ID: {member.id}\n About: {about}\n Avatar url: [click here]({member.avatar_url})\nAccount Created: {member.created_at.strftime("%d/%m/%Y")}\n Last online: {member.last_online.strftime("%d/%m/%Y")}')
embed.set_thumbnail(member.avatar_url.replace('https://img.guildedcdn.com/','https://s3-us-west-2.amazonaws.com/www.guilded.gg/'))
await ctx.send(embed=embed)
@bot.command()
async def transfer(ctx, member: guilded.User=None):
if not member:
embed = guilded.Embed(title = 'Oofus', description=f"You need to enter the member you want to transfer to.")
embed.set_thumbnail(ctx.author.avatar_url.replace('https://img.guildedcdn.com/','https://s3-us-west-2.amazonaws.com/www.guilded.gg/'))
await ctx.send(embed=embed)
else:
try:
rawargs = ctx.content.replace(f'{pre}transfer', '')
args = rawargs.split()
member = await bot.fetch_user(member)
if str(type(eval(args[1]))) == "<class 'int'>":
amount = int(args[1])
if ledgyboi[ctx.author.name]['points'] >= amount:
if member.name not in ledgyboi.keys():
instancenewuserorupdateolduser(member.name)
debit(ctx.author.name, amount)
credit(member.name, amount)
embed = guilded.Embed(title = 'Transfer Succeeded!', description=f"Debited {amount} from {ctx.author.name} and credited {amount} to {member.name}")
embed.set_thumbnail(ctx.author.avatar_url.replace('https://img.guildedcdn.com/','https://s3-us-west-2.amazonaws.com/www.guilded.gg/'))
await ctx.send(embed=embed)
else:
embed = guilded.Embed(title = 'Oofus', description=f"You don't have the necessary funds! Transfer cannot be performed!!")
embed.set_thumbnail(ctx.author.avatar_url.replace('https://img.guildedcdn.com/','https://s3-us-west-2.amazonaws.com/www.guilded.gg/'))
await ctx.send(embed=embed)
else:
embed = guilded.Embed(title = 'Oofus', description=f"You need to enter the amount to transfer in numbers!! format: {pre}transfer <mention> <amount>")
embed.set_thumbnail(ctx.author.avatar_url.replace('https://img.guildedcdn.com/','https://s3-us-west-2.amazonaws.com/www.guilded.gg/'))
await ctx.send(embed=embed)
except KeyError:
embed = guilded.Embed(title = 'Oofus', description=f"You need to mention the user first! Then amount!!")
embed.set_thumbnail(ctx.author.avatar_url.replace('https://img.guildedcdn.com/','https://s3-us-west-2.amazonaws.com/www.guilded.gg/'))
await ctx.send(embed=embed)
except NameError:
embed = guilded.Embed(title = 'Oofus', description=f"You need to enter the amount to transfer in numbers!! format: {pre}transfer <mention> <amount>")
embed.set_thumbnail(ctx.author.avatar_url.replace('https://img.guildedcdn.com/','https://s3-us-west-2.amazonaws.com/www.guilded.gg/'))
await ctx.send(embed=embed)
# for debugging bot plz dont use.
@bot.command()
async def uuddlrlrbastart(ctx):
rawargs = ctx.content.replace(f'{pre}uuddlrlrbastart', '')
args = rawargs.split()
if str(type(eval(args[0]))) == "<class 'int'>":
credit(ctx.author.name, int(args[0]))
embed = guilded.Embed(title = 'Applied', description=f"Your points are now: {ledgyboi[ctx.author.name]['points']}")
embed.set_thumbnail(ctx.author.avatar_url.replace('https://img.guildedcdn.com/','https://s3-us-west-2.amazonaws.com/www.guilded.gg/'))
await ctx.send(embed=embed)
else:
embed = guilded.Embed(title = 'Oofus', description='Cheatcode can only take in integers!')
embed.set_thumbnail(ctx.author.avatar_url.replace('https://img.guildedcdn.com/','https://s3-us-west-2.amazonaws.com/www.guilded.gg/'))
await ctx.send(embed=embed)
# HELL BEGINS HERE
# JOKES OVER
# ACTUAL WAIFU BOT CODE
@bot.command()
async def waifu(ctx, member: guilded.User=None):
rawargs = ctx.content.replace(f'{pre}waifu', '')
args = rawargs.split()
owner = None
ownedwaifus = {}
for key in ledgyboi.keys():
ownedwaifus[key] = ledgyboi[key]['waifus'].keys()
if args != []:
if args[0] == 'help':
embed = guilded.Embed(title='Help', description=f"Waifus are this bot's flagship feature! Points are required to use waifu commands, they can be gathered by chatting!!")
embed.add_field(name='#1 browsing', value=f"You can use '{pre}waifu random' (without quotes) to get an instant shot of dopamine (hopefully xD) It costs 100 points though")
embed.add_field(name='#2 buying', value=f"You can use '{pre}waifu buy <Waifu code>' to buy a waifu image! After that, whenever you view the image, it will be free of cost! Other players will be able to appreciate your prod ownership if they ever encounter it!!")
embed.add_field(name='#3 waifu codes', value=f"Each and every waifu image has a code which is unque to it! If you like a waifu image, this code is the way to encounter it again. This feature can be handy if you want to check on the ownership of an image when saving up to buy it!")
embed.add_field(name='#4 owned', value=f"Lists all of your collected waifus!")
embed.set_thumbnail(ctx.author.avatar_url.replace('https://img.guildedcdn.com/','https://s3-us-west-2.amazonaws.com/www.guilded.gg/'))
await ctx.send(embed=embed)
elif args[0] == 'random':
price = 100
if ledgyboi[ctx.author.name]['points'] >= price:
rwaifu = random.choice(os.listdir(waifus))
embed = guilded.Embed(title=f'And your waifu is gonna be...', description=f'Waifu code:{lowerutils.remove_extension(rwaifu)}!')
waifurl = await upload_file(f'{waifus}{rwaifu}', 'ContentMedia')
embed.set_image(waifurl)
if lowerutils.remove_extension(rwaifu) in lowerutils.listmerger(ownedwaifus.values()):
for key in ownedwaifus.keys():
if lowerutils.remove_extension(rwaifu) in ownedwaifus[key]:
owner = key
embed.add_field(name = 'Waifu Image Status:', value=f"Belongs to: {key}")
else:
embed.add_field(name = 'Waifu Image Status:', value=f"Unowned")
if owner:
if ctx.author.name == owner:
embed.add_field(name = 'Points:', value=f"No points lost! The waifu already belonged to you!!")
owner = None
else:
debit(ctx.author.name, price)
embed.add_field(name = 'Points:', value=f"You are left with {ledgyboi[ctx.author.name]['points']} points now.")
owner = None
embed.set_thumbnail(ctx.author.avatar_url.replace('https://img.guildedcdn.com/','https://s3-us-west-2.amazonaws.com/www.guilded.gg/'))
await ctx.send(embed=embed)
else:
embed = guilded.Embed(title='Oofus', description=f'You need to have atleast {str(price)} points to use this command!! Chat more!')
embed.set_thumbnail(ctx.author.avatar_url.replace('https://img.guildedcdn.com/','https://s3-us-west-2.amazonaws.com/www.guilded.gg/'))
await ctx.send(embed=embed)
elif args[0] == "owned":
owned = ''
if f"{ownedwaifus[ctx.author.name]}" == "dict_keys([])":
embed = guilded.Embed(title='Oofus', description='It seems like you own no waifus...')
else:
embed = guilded.Embed(title=f"{ctx.author.name}'s harem:", description=f'You have these waifus:')
for waifux in ownedwaifus[ctx.author.name]:
if waifux in chutiya.keys():
embed.add_field(name = waifux, value=f"Selling: True, Listed: {chutiya[waifux]['Visibility']}")
else:
embed.add_field(name = waifux, value=f"Selling: False")
embed.set_thumbnail(ctx.author.avatar_url.replace('https://img.guildedcdn.com/','https://s3-us-west-2.amazonaws.com/www.guilded.gg/'))
await ctx.send(embed=embed)
elif args[0] == 'buy':
if args[1] in awaifus:
if args[1] in lowerutils.listmerger(ownedwaifus.values()) and args[1] in ledgyboi[ctx.author.name]['waifus'].keys():
embed = guilded.Embed(title = 'Uhh', description='It already belongs to you!!')
waifurl = await upload_file(f"{lowerutils.filename_helper(waifus, args[1], ['.png', '.jpeg', '.jpg'])}", 'ContentMedia')
embed.set_image(waifurl)
for key in ownedwaifus.keys():
if args[1] in ownedwaifus[key]:
embed.add_field(name = 'Owned by:', value=f"{key}")
embed.add_field(name = 'Points:', value=f"You are left with {ledgyboi[ctx.author.name]['points']} points now, NO POINTS DEDUCTED.")
embed.set_thumbnail(ctx.author.avatar_url.replace('https://img.guildedcdn.com/','https://s3-us-west-2.amazonaws.com/www.guilded.gg/'))
await ctx.send(embed=embed)
elif args[1] in chutiya.keys():
try:
if args[2]:
if args[2] == "accept":
price = chutiya[args[1]]['Price']
code = chutiya[args[1]]['Code']
owner = chutiya[args[1]]['Owner']
if ledgyboi[ctx.author.name]["points"] > price:
ledgyboi[ctx.author.name]['waifus'][args[1]] = {}
ledgyboi[ctx.author.name]['waifus'][args[1]]['selling'] = False
ledgyboi[ctx.author.name]['waifus'][args[1]]['listed'] = False
prevown = chutiya[args[1]]["Owner"]
if "Name" in chutiya[args[1]].keys():
name = chutiya[args[1]]['Name']
embed = guilded.Embed(title = 'Yay!', description=f"You bought {code} aka {name} from {owner} for {price} successfully!!")
else:
embed = guilded.Embed(title = | |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import argparse
import logging
import multiprocessing
import os
import sys
import tempfile
from copy import deepcopy
from functools import cmp_to_key
from functools import partial
import sh
from six.moves import configparser
from dlrn.build import build_worker
from dlrn.config import ConfigOptions
from dlrn.config import getConfigOptions
from dlrn.config import setup_logging
from dlrn.db import CIVote
from dlrn.db import closeSession
from dlrn.db import Commit
from dlrn.db import getCommits
from dlrn.db import getLastBuiltCommit
from dlrn.db import getLastProcessedCommit
from dlrn.db import getSession
from dlrn.db import Project
from dlrn.notifications import sendnotifymail
from dlrn.notifications import submit_review
from dlrn.reporting import genreports
from dlrn.repositories import getsourcebranch
from dlrn.rpmspecfile import RpmSpecCollection
from dlrn.rpmspecfile import RpmSpecFile
from dlrn.rsync import sync_repo
from dlrn.rsync import sync_symlinks
from dlrn.utils import dumpshas2file
from dlrn.utils import import_object
from dlrn.utils import isknownerror
from dlrn.utils import lock_file
from dlrn.utils import saveYAML_commit
from dlrn.utils import timesretried
from dlrn import version
logger = logging.getLogger("dlrn")
def deprecation():
# We will still call main, but will indicate that this way of calling
# the application will be deprecated.
print("Using the 'delorean' command has been deprecated. Please use 'dlrn'"
" instead.")
main()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--config-file',
default='projects.ini',
help="Config file. Default: projects.ini")
parser.add_argument('--info-repo',
help="use a local rdoinfo repo instead of"
" fetching the default one using rdopkg. Only"
" applies when pkginfo_driver is rdoinfo in"
" projects.ini")
parser.add_argument('--build-env', action='append',
help="Variables for the build environment.")
parser.add_argument('--local', action="store_true",
help="Use local git repos if possible. Only commited"
" changes in the local repo will be used in the"
" build.")
parser.add_argument('--head-only', action="store_true",
help="Build from the most recent Git commit only.")
group = parser.add_mutually_exclusive_group()
group.add_argument('--project-name', action='append',
help="Build a specific project name only."
" Use multiple times to build more than one "
"project in a run.")
group.add_argument('--package-name', action='append',
help="Build a specific package name only."
" Use multiple times to build more than one "
"package in a run.")
parser.add_argument('--dev', action="store_true",
help="Don't reset packaging git repo, force build "
"and add public master repo for dependencies "
"(dev mode).")
parser.add_argument('--log-commands', action="store_true",
help="Log the commands run by dlrn.")
parser.add_argument('--use-public', action="store_true",
help="Use the public master repo for dependencies "
"when doing install verification.")
parser.add_argument('--order', action="store_true",
help="Compute the build order according to the spec "
"files instead of the dates of the commits. "
"Implies --sequential.")
parser.add_argument('--sequential', action="store_true",
help="Run all actions sequentially, regardless of the"
" number of workers specified in projects.ini.")
parser.add_argument('--status', action="store_true",
help="Get the status of packages.")
parser.add_argument('--recheck', action="store_true",
help="Force a rebuild for a particular package. "
"Implies --package-name")
parser.add_argument('--version',
action='version',
version=version.version_info.version_string())
parser.add_argument('--run',
help="Run a program instead of trying to build. "
"Implies --head-only")
parser.add_argument('--stop', action="store_true",
help="Stop on error.")
parser.add_argument('--verbose-build', action="store_true",
help="Show verbose output during the package build.")
parser.add_argument('--verbose-mock', action="store_true",
help=argparse.SUPPRESS)
parser.add_argument('--no-repo', action="store_true",
help="Do not generate a repo with all the built "
"packages.")
parser.add_argument('--debug', action='store_true',
help="Print debug logs")
options = parser.parse_args(sys.argv[1:])
setup_logging(options.debug)
if options.verbose_mock:
logger.warning('The --verbose-mock command-line option is deprecated.'
' Please use --verbose-build instead.')
options.verbose_build = options.verbose_mock
global verbose_build
verbose_build = options.verbose_build
cp = configparser.RawConfigParser()
cp.read(options.config_file)
if options.log_commands is True:
logging.getLogger("sh.command").setLevel(logging.INFO)
if options.order is True:
options.sequential = True
config_options = ConfigOptions(cp)
if options.dev:
_, tmpdb_path = tempfile.mkstemp()
logger.info("Using file %s for temporary db" % tmpdb_path)
config_options.database_connection = "sqlite:///%s" % tmpdb_path
session = getSession(config_options.database_connection)
pkginfo_driver = config_options.pkginfo_driver
global pkginfo
pkginfo = import_object(pkginfo_driver, cfg_options=config_options)
packages = pkginfo.getpackages(local_info_repo=options.info_repo,
tags=config_options.tags,
dev_mode=options.dev)
if options.project_name:
pkg_names = [p['name'] for p in packages
if p['project'] in options.project_name]
elif options.package_name:
pkg_names = options.package_name
else:
pkg_names = None
if options.status is True:
if not pkg_names:
pkg_names = [p['name'] for p in packages]
for name in pkg_names:
package = [p for p in packages if p['name'] == name][0]
for build_type in package.get('types', ['rpm']):
commit = getLastProcessedCommit(
session, name, 'invalid status',
type=build_type)
if commit:
print("{:>9}".format(build_type), name, commit.status)
else:
print("{:>9}".format(build_type), name, 'NO_BUILD')
sys.exit(0)
if pkg_names:
pkg_name = pkg_names[0]
else:
pkg_name = None
def recheck_commit(commit):
if commit.status == 'SUCCESS':
logger.error(
"Trying to recheck an already successful commit,"
" ignoring.")
sys.exit(1)
elif commit.status == 'RETRY':
# In this case, we are going to retry anyway, so
# do nothing and exit
logger.warning("Trying to recheck a commit in RETRY state,"
" ignoring.")
sys.exit(0)
else:
# We could set the status to RETRY here, but if we have gone
# beyond max_retries it wouldn't work as expected. Thus, our
# only chance is to remove the commit
session.delete(commit)
session.commit()
sys.exit(0)
if options.recheck is True:
if not pkg_name:
logger.error('Please use --package-name or --project-name '
'with --recheck.')
sys.exit(1)
package = [p for p in packages if p['name'] == pkg_name][0]
for build_type in package.get('types', ['rpm']):
commit = getLastProcessedCommit(session, pkg_name, type=build_type)
if commit:
recheck_commit(commit)
else:
logger.error("There are no existing commits for package %s",
pkg_name)
sys.exit(1)
# when we run a program instead of building we don't care about
# the commits, we just want to run once per package
if options.run:
options.head_only = True
# Build a list of commits we need to process
toprocess = []
def add_commits(project_toprocess):
# The first entry in the list of commits is a commit we have
# already processed, we want to process it again only if in dev
# mode or distro hash has changed, we can't simply check
# against the last commit in the db, as multiple commits can
# have the same commit date
for commit_toprocess in project_toprocess:
if options.dev is True or \
options.run or \
not session.query(Commit).filter(
Commit.commit_hash == commit_toprocess.commit_hash,
Commit.distro_hash == commit_toprocess.distro_hash,
Commit.extended_hash == commit_toprocess.extended_hash,
Commit.type == commit_toprocess.type,
Commit.status != "RETRY").all():
toprocess.append(commit_toprocess)
if not pkg_name and not pkg_names:
pool = multiprocessing.Pool() # This will use all the system cpus
# Use functools.partial to iterate on the packages to process,
# while keeping a few options fixed
getinfo_wrapper = partial(getinfo, local=options.local,
dev_mode=options.dev,
head_only=options.head_only,
db_connection=config_options.
database_connection)
iterator = pool.imap(getinfo_wrapper, packages)
while True:
try:
project_toprocess, updated_pkg = iterator.next()
for package in packages:
if package['name'] == updated_pkg['name']:
if package['upstream'] == 'Unknown':
package['upstream'] = updated_pkg['upstream']
logger.debug(
"Updated upstream for package %s to %s",
package['name'], package['upstream'])
break
add_commits(project_toprocess)
except StopIteration:
break
pool.close()
pool.join()
else:
for package in packages:
if package['name'] in pkg_names:
project_toprocess, _ = getinfo(package, local=options.local,
dev_mode=options.dev,
head_only=options.head_only,
db_connection=config_options.
database_connection)
add_commits(project_toprocess)
closeSession(session) # Close session, will reopen during post_build
# Check if there is any commit at all to process
if len(toprocess) == 0:
if not pkg_name:
# Use a shorter message if this was a full run
logger.info("No commits to build.")
else:
logger.info("No commits to build. If this is not expected, please"
" make sure the package name(s) are correct, and that "
"any failed commit you want to rebuild has been "
"removed from the database.")
return 0
# if requested do a sort according to build and install
# dependencies
if options.order is True:
# collect info from all spec files
logger.info("Reading rpm spec files")
projects = sorted([c.project_name for c in toprocess])
speclist = []
bootstraplist = []
for project_name in projects:
# Preprocess spec if needed
pkginfo.preprocess(package_name=project_name)
specpath = os.path.join(pkginfo.distgit_dir(project_name),
project_name + '.spec')
speclist.append(sh.rpmspec('-D', 'repo_bootstrap 1',
'-P', specpath))
# Check if repo_bootstrap is defined in the package.
# If so, we'll need to rebuild after the whole bootstrap exercise
rawspec = open(specpath).read(-1)
if 'repo_bootstrap' in rawspec:
bootstraplist.append(project_name)
logger.debug("Packages to rebuild: %s" % bootstraplist)
specs = RpmSpecCollection([RpmSpecFile(spec)
for spec in speclist])
# compute order according to BuildRequires
logger.info("Computing build order")
orders = specs.compute_order()
# hack because the package name is not consistent with the directory
# name and the spec file name
if 'python-networking_arista' in orders:
orders.insert(orders.index('python-networking_arista'),
'python-networking-arista')
# sort the commits according to the score of their project and
# then use the timestamp of the commits as a secondary key
def my_cmp(a, b):
if a.project_name == b.project_name:
_a = a.dt_commit
_b = b.dt_commit
else:
| |
<reponame>vishalbelsare/epymetheus-1
import abc
import json
from functools import partial
from time import time
from typing import TypeVar
import numpy as np
import pandas as pd
from .. import ts
from .._utils import print_if_verbose
from .._utils import to_json
from ..exceptions import NoTradeWarning
from ..exceptions import NotRunError
from ..metrics import metric_from_name
from ..trade import Trade
from ..trade import check_trade
T = TypeVar("T", bound="Strategy")
def create_strategy(fn, **params) -> T:
"""Create a :class:`Strategy` from a function.
A function `fn` takes more than one parameters.
The first parameter should be `universe`, which is a `pandas.DataFrame`
of historical prices. That is, its indices represents timestamp, columns
stands for the names of assets, and each element in the frame stores a price.
The following parameters are user-defined ones parameterizing the strategy.
These parameters should be useful when one want to try various configurations
such as trade volume, profit-taking threshold, loss-cut threshold, and so forth.
The function `fn` is supposed to yield arbitrary numbers of :class:`Trade`
depending on the `universe` and other parameters. These trades will be executed
for the `universe` and evaluated accordingly.
Args:
fn (callable): A function that returns iterable of :class:`Trade`
from a `universe` and parameters.
**params: names and values of the parameters.
Returns:
:class:`Strategy`
Examples:
The following strategy trades the first asset in a given `universe`.
The parameter `my_param` controls the volume to trade.
>>> import epymetheus as ep
>>>
>>> def fn(universe, my_param):
... asset = universe.columns[0]
... yield my_param * ep.trade(asset)
>>>
>>> strategy = ep.create_strategy(fn, my_param=2.0)
>>> universe = pd.DataFrame({"AAPL": [100, 101], "AMZN": [200, 201]})
>>> strategy(universe)
[trade(['AAPL'], lot=[2.])]
"""
return Strategy._create_strategy(fn, **params)
class Strategy(abc.ABC):
"""Base class of trading strategy.
There are two ways to create a :class:`Strategy`:
- Use :func:`create_strategy`: This should be easier for simple strategies.
See :func:`create_strategy`.
- Subclass :class:`Strategy`: See below.
One can create a strategy by subclassing :class:`Strategy` and
overriding a method `logic`.
The method `logic` takes arbitrary numbers of user-defined parameters
parameterizing the strategy.
These parameters should be useful when one want to try various configurations
such as trade volume, profit-taking threshold, loss-cut threshold, and so forth.
The method `logic` is supposed to yield arbitrary numbers of :class:`Trade`
depending on the `universe` and other parameters. These trades will be executed
for the `universe` and evaluated accordingly.
Examples:
The following strategy trades the first asset in a given `universe`.
The parameter `my_param` controls the volume to trade.
>>> import pandas as pd
>>> import epymetheus as ep
>>>
>>> class MyStrategy(ep.Strategy):
... def __init__(self, my_param):
... self.my_param = my_param
...
... def logic(self, universe: pd.DataFrame):
... asset = universe.columns[0]
... yield self.my_param * ep.trade(asset)
...
>>> strategy = MyStrategy(my_param=2.0)
>>> universe = pd.DataFrame({"AAPL": [100, 101], "AMZN": [200, 201]})
>>> strategy(universe)
[trade(['AAPL'], lot=[2.])]
The method :func:`run` runs the strategy on a given universe.
>>> strategy = MyStrategy(my_param=2.0).run(universe, verbose=False)
>>> strategy.trades
[trade(['AAPL'], lot=[2.])]
"""
@classmethod
def _create_strategy(cls, fn, **params) -> T:
self = cls()
self._fn = fn
self._params = params
return self
def __call__(self, universe, to_list=True):
if hasattr(self, "_fn"):
setattr(self, "logic", partial(self._fn, **self.get_params()))
trades = self.logic(universe)
trades = list(trades) if to_list else trades
return trades
def logic(self, universe):
"""Logic to generate trades from universe.
Override this to implement trading strategy by subclassing `Strategy`.
Args:
universe (pandas.DataFrame): Historical price data to apply this strategy.
The index represents timestamps and the column is the assets.
**params: Parameter values.
Returns:
iterable[Trade]
"""
def run(self: T, universe, verbose=True, check_trades=False) -> T:
"""Run a backtesting of strategy.
Args:
universe (pandas.DataFrame): Historical price data to apply this strategy.
The index represents timestamps and the column is the assets.
verbose (bool, default=True): Verbose mode.
check_trade (bool, default=False):
If `True`, check that `asset`, `entry`, `exit` of trade
Returns:
self
"""
_begin_time = time()
self.universe = universe
# Yield trades
_begin_time_yield = time()
trades = []
for i, t in enumerate(self(universe, to_list=False) or []):
print_if_verbose(
f"\r{i + 1} trades returned: {t} ... ", end="", verbose=verbose
)
if check_trades:
check_trade(t, universe)
trades.append(t)
if len(trades) == 0:
raise NoTradeWarning("No trade was returned.")
_time = time() - _begin_time_yield
print_if_verbose(f"Done. (Runtume: {_time:.4f} sec)", verbose=verbose)
# Execute trades
_begin_time_execute = time()
for i, t in enumerate(trades):
print_if_verbose(
f"\r{i + 1} trades executed: {t} ... ", end="", verbose=verbose
)
t.execute(universe)
_time = time() - _begin_time_execute
print_if_verbose(f"Done. (Runtime: {_time:.4f} sec)", verbose=verbose)
self.trades = trades
_time = time() - _begin_time
final_wealth = self.score("final_wealth")
print_if_verbose(
f"Done. Final wealth: {final_wealth:.2f} (Runtime: {_time:.4f} sec)",
verbose=verbose,
)
return self
def score(self, metric_name) -> float:
"""Returns the value of a metric of self.
Args:
metric_name (str): Metric to evaluate.
Returns:
float
"""
if not hasattr(self, "trades"):
raise NotRunError("Strategy has not been run")
return metric_from_name(metric_name)(self.trades, self.universe)
def history(self) -> pd.DataFrame:
"""Return `pandas.DataFrame` of trade history.
Returns:
pandas.DataFrame
"""
if not hasattr(self, "trades"):
raise NotRunError("Strategy has not been run")
data = {}
n_orders = np.array([t.asset.size for t in self.trades])
data["trade_id"] = np.repeat(np.arange(len(self.trades)), n_orders)
data["asset"] = np.concatenate([t.asset for t in self.trades])
data["lot"] = np.concatenate([t.lot for t in self.trades])
data["entry"] = np.repeat([t.entry for t in self.trades], n_orders)
data["close"] = np.repeat([t.close for t in self.trades], n_orders)
data["exit"] = np.repeat([t.exit for t in self.trades], n_orders)
data["take"] = np.repeat([t.take for t in self.trades], n_orders)
data["stop"] = np.repeat([t.stop for t in self.trades], n_orders)
data["pnl"] = np.concatenate([t.final_pnl(self.universe) for t in self.trades])
return pd.DataFrame(data)
def trades_to_dict(self) -> list:
"""Represents and returns `trades` as `dict` objects.
Returns:
list[dict]
Examples:
>>> import epymetheus as ep
>>>
>>> strategy = ep.create_strategy(
... lambda universe: [ep.trade("AAPL")]
... ).run(pd.DataFrame({"AAPL": [100, 101]}), verbose=False)
>>> strategy.trades_to_dict()
[{'asset': ['AAPL'], 'lot': [1.0], 'close': 1}]
"""
return [trade.to_dict() for trade in self.trades]
def trades_to_json(self):
"""Represents and returns `trades` as a string in JSON format.
Returns:
str
Examples:
>>> import epymetheus as ep
>>>
>>> strategy = ep.create_strategy(
... lambda universe: [ep.trade("AAPL")]
... ).run(pd.DataFrame({"AAPL": [100, 101]}), verbose=False)
>>> strategy.trades_to_json()
'[{"asset": ["AAPL"], "lot": [1.0], "close": 1}]'
>>> s = '[{"asset": ["AAPL"], "lot": [1.0], "close": 1}]'
>>> strategy = Strategy()
>>> strategy.universe = pd.DataFrame({"AAPL": [100, 101]})
>>> strategy.load_trades_json(s).trades
[trade(['AAPL'], lot=[1.])]
"""
return to_json(self.trades_to_dict())
def load(self, history: pd.DataFrame, universe: pd.DataFrame):
"""Load trade history and universe.
Args:
history (pandas.DataFrame): History to load.
universe (pandas.DataFrame): Universe to load.
Returns:
self
"""
return self.load_universe(universe).load_history(history)
def load_history(self, history: pd.DataFrame):
self.trades = Trade.load_history(history)
for trade in self.trades:
# Assuming that self has loaded universe
trade.execute(self.universe)
return self
def load_trades_dict(self, l: list) -> "Strategy":
"""
Args:
l (list[dict]):
"""
self.trades = [Trade.load_dict(d) for d in l]
for trade in self.trades:
# Assuming that self has loaded universe
trade.execute(self.universe)
return self
def load_trades_json(self, s: str) -> "Strategy":
"""
Args:
s (str):
"""
# try:
trades_as_dict = json.loads(s)
# except json.JSONDecodeError:
# # If s cannot be interpreted as a json string,
# # try to interpret it as a file name of json
# trades_as_dict = json.load(s)
self.load_trades_dict(trades_as_dict)
for trade in self.trades:
# Assuming that self has loaded universe
trade.execute(self.universe)
return self
def load_universe(self, universe: pd.DataFrame):
"""Load universe.
Args:
universe (pandas.DataFrame): Universe to load.
Returns:
self
"""
self.universe = universe
return self
def wealth(self) -> pd.Series:
"""Return `pandas.Series` of wealth.
Returns:
pandas.Series
"""
if not hasattr(self, "trades"):
raise NotRunError("Strategy has not been run")
return pd.Series(
ts.wealth(self.trades, self.universe), index=self.universe.index
)
def drawdown(self) -> pd.Series:
"""
Returns:
pandas.Series
"""
if not hasattr(self, "trades"):
raise NotRunError("Strategy has not been run")
drawdown = ts.drawdown(self.trades, self.universe)
return pd.Series(drawdown, index=self.universe.index)
def exposure(self) -> pd.DataFrame:
"""Return exposure of self to each asset.
Returns:
pandas.DataFrame: DataFrame of exposure.
Examples:
>>> import pandas as pd
>>> import epymetheus as ep
...
>>> universe = pd.DataFrame({
... "A0": [1, 2, 3, 4, 5],
... "A1": [2, 3, 4, 5, 6],
... "A2": [3, 4, 5, 6, 7],
... })
>>> strategy = ep.create_strategy(lambda universe: [
... [1, -1] * ep.trade(["A0", "A2"], entry=1, exit=3),
... [-1, 2] * ep.trade(["A1", "A2"], entry=2, exit=4),
... ]).run(universe, verbose=False)
>>> strategy.exposure()
A0 A1 A2
0 0.0 0.0 0.0
1 0.0 0.0 0.0
2 3.0 0.0 -5.0
3 4.0 -5.0 6.0
4 0.0 -6.0 14.0
"""
if not hasattr(self, "trades"):
raise NotRunError("Strategy has not been | |
for the polyline
# folium takes coords in lat,lon but geopandas provides them in lon,lat
# so we have to flip them around
locations = list([(lat, lon) for lon, lat in edge['geometry'].coords])
# if popup_attribute is None, then create no pop-up
if popup_attribute is None:
popup = None
else:
# folium doesn't interpret html in the html argument (weird), so can't
# do newlines without an iframe
popup_text = json.dumps(edge[popup_attribute])
popup = folium.Popup(html=popup_text)
# create a folium polyline with attributes
pl = folium.PolyLine(locations=locations, popup=popup,
color=edge_color, weight=edge_width, opacity=edge_opacity)
return pl
def plot_graph_folium(G, graph_map=None, popup_attribute=None,
tiles='cartodbpositron', zoom=1, fit_bounds=True,
edge_color='#333333', edge_width=5, edge_opacity=1):
"""
Plot a graph on an interactive folium web map.
Note that anything larger than a small city can take a long time to plot and
create a large web map file that is very slow to load as JavaScript.
Parameters
----------
G : networkx multidigraph
graph_map : folium.folium.Map
if not None, plot the graph on this preexisting folium map object
popup_attribute : string
edge attribute to display in a pop-up when an edge is clicked
tiles : string
name of a folium tileset
zoom : int
initial zoom level for the map
fit_bounds : bool
if True, fit the map to the boundaries of the route's edges
edge_color : string
color of the edge lines
edge_width : numeric
width of the edge lines
edge_opacity : numeric
opacity of the edge lines
Returns
-------
graph_map : folium.folium.Map
"""
# check if we were able to import folium successfully
if not folium:
raise ImportError('The folium package must be installed to use this optional feature.')
# create gdf of the graph edges
gdf_edges = graph_to_gdfs(G, nodes=False, fill_edge_geometry=True)
# get graph centroid
x, y = gdf_edges.unary_union.centroid.xy
graph_centroid = (y[0], x[0])
# create the folium web map if one wasn't passed-in
if graph_map is None:
graph_map = folium.Map(location=graph_centroid, zoom_start=zoom, tiles=tiles)
# add each graph edge to the map
for _, row in gdf_edges.iterrows():
pl = make_folium_polyline(edge=row, edge_color=edge_color, edge_width=edge_width,
edge_opacity=edge_opacity, popup_attribute=popup_attribute)
pl.add_to(graph_map)
# if fit_bounds is True, fit the map to the bounds of the route by passing
# list of lat-lng points as [southwest, northeast]
if fit_bounds:
tb = gdf_edges.total_bounds
bounds = [(tb[1], tb[0]), (tb[3], tb[2])]
graph_map.fit_bounds(bounds)
return graph_map
def plot_route_folium(G, route, route_map=None, popup_attribute=None,
tiles='cartodbpositron', zoom=1, fit_bounds=True,
route_color='#cc0000', route_width=5, route_opacity=1):
"""
Plot a route on an interactive folium web map.
Parameters
----------
G : networkx multidigraph
route : list
the route as a list of nodes
route_map : folium.folium.Map
if not None, plot the route on this preexisting folium map object
popup_attribute : string
edge attribute to display in a pop-up when an edge is clicked
tiles : string
name of a folium tileset
zoom : int
initial zoom level for the map
fit_bounds : bool
if True, fit the map to the boundaries of the route's edges
route_color : string
color of the route's line
route_width : numeric
width of the route's line
route_opacity : numeric
opacity of the route lines
Returns
-------
route_map : folium.folium.Map
"""
# check if we were able to import folium successfully
if not folium:
raise ImportError('The folium package must be installed to use this optional feature.')
# create gdf of the route edges
gdf_edges = graph_to_gdfs(G, nodes=False, fill_edge_geometry=True)
route_nodes = list(zip(route[:-1], route[1:]))
index = [gdf_edges[(gdf_edges['u']==u) & (gdf_edges['v']==v)].index[0] for u, v in route_nodes]
gdf_route_edges = gdf_edges.loc[index]
# get route centroid
x, y = gdf_route_edges.unary_union.centroid.xy
route_centroid = (y[0], x[0])
# create the folium web map if one wasn't passed-in
if route_map is None:
route_map = folium.Map(location=route_centroid, zoom_start=zoom, tiles=tiles)
# add each route edge to the map
for _, row in gdf_route_edges.iterrows():
pl = make_folium_polyline(edge=row, edge_color=route_color, edge_width=route_width,
edge_opacity=route_opacity, popup_attribute=popup_attribute)
pl.add_to(route_map)
# if fit_bounds is True, fit the map to the bounds of the route by passing
# list of lat-lng points as [southwest, northeast]
if fit_bounds:
tb = gdf_route_edges.total_bounds
bounds = [(tb[1], tb[0]), (tb[3], tb[2])]
route_map.fit_bounds(bounds)
return route_map
def plot_figure_ground(G=None, address=None, point=None, dist=805,
network_type='drive_service', street_widths=None,
default_width=4, fig_length=8, edge_color='w',
bgcolor='#333333', smooth_joints=True, filename=None,
file_format='png', show=False, save=True, close=True,
dpi=300):
"""
Plot a figure-ground diagram of a street network, defaulting to one square
mile.
Parameters
----------
G : networkx multidigraph
address : string
the address to geocode as the center point if G is not passed in
point : tuple
the center point if address and G are not passed in
dist : numeric
how many meters to extend north, south, east, and west from the center
point
network_type : string
what type of network to get
street_widths : dict
where keys are street types and values are widths to plot in pixels
default_width : numeric
the default street width in pixels for any street type not found in
street_widths dict
fig_length : numeric
the height and width of this square diagram
edge_color : string
the color of the streets
bgcolor : string
the color of the background
smooth_joints : bool
if True, plot nodes same width as streets to smooth line joints and
prevent cracks between them from showing
filename : string
filename to save the image as
file_format : string
the format of the file to save (e.g., 'jpg', 'png', 'svg')
show : bool
if True, show the figure
save : bool
if True, save the figure as an image file to disk
close : bool
close the figure (only if show equals False) to prevent display
dpi : int
the resolution of the image file if saving
Returns
-------
fig, ax : tuple
"""
multiplier = 1.2
# if G was passed-in, use this graph in the plot, centered on the centroid
# of its nodes
if G is not None:
gdf_nodes = graph_to_gdfs(G, edges=False, node_geometry=True)
lnglat_point = gdf_nodes.unary_union.centroid.coords[0]
point = tuple(reversed(lnglat_point))
# otherwise, get the network by either address or point, whichever was
# passed-in, using a distance multiplier to make sure we get more than
# enough network. simplify in non-strict mode to not combine multiple street
# types into single edge
elif address is not None:
G, point = graph_from_address(address, distance=dist*multiplier, distance_type='bbox', network_type=network_type,
simplify=False, truncate_by_edge=True, return_coords=True)
G = simplify_graph(G, strict=False)
elif point is not None:
G = graph_from_point(point, distance=dist*multiplier, distance_type='bbox', network_type=network_type,
simplify=False, truncate_by_edge=True)
G = simplify_graph(G, strict=False)
else:
raise ValueError('You must pass an address or lat-long point or graph.')
# project the network to UTM
G = project_graph(G)
# if user did not pass in custom street widths, create a dict of default
# values
if street_widths is None:
street_widths = {'footway' : 1.5,
'steps' : 1.5,
'pedestrian' : 1.5,
'service' : 1.5,
'path' : 1.5,
'track' : 1.5,
'motorway' : 6}
# we need an undirected graph to find every edge incident to a node
G_undir = G.to_undirected()
# for each network edge, get a linewidth according to street type (the OSM
# 'highway' value)
edge_linewidths = []
for _, _, data in G_undir.edges(keys=False, data=True):
street_type = data['highway'][0] if isinstance(data['highway'], list) else data['highway']
if street_type in street_widths:
edge_linewidths.append(street_widths[street_type])
else:
edge_linewidths.append(default_width)
if smooth_joints:
# for each node, get a nodesize according to the narrowest incident edge
node_widths = {}
for node in G_undir.nodes():
# first, identify all the highway types of this node's incident edges
incident_edges_data = [G_undir.get_edge_data(node, neighbor) for neighbor in G_undir.neighbors(node)]
edge_types = [data[0]['highway'] for data in incident_edges_data]
if len(edge_types) < 1:
# if node has no incident edges, make size zero
node_widths[node] = 0
else:
# flatten the list of edge types
edge_types_flat = []
for et in edge_types:
if isinstance(et, list):
edge_types_flat.extend(et)
else:
edge_types_flat.append(et)
# for each edge type in the flattened list, lookup the
# corresponding width
edge_widths = [street_widths[edge_type] if edge_type in street_widths else default_width for edge_type in edge_types_flat]
# the node diameter will be the biggest of the edge widths, to make joints perfectly smooth
# alternatively, use min (?) to pervent anything larger from extending past smallest street's line
circle_diameter = max(edge_widths)
# mpl | |
str(signalDict['dimensions'])+'\n'
gamText += ' NumberOfElements = ' + \
str(signalDict['elements'])+'\n'
gamText += ' Type = '+signalDict['type']+'\n'
gamText += ' }\n'
gamText += ' }\n'
gamText += ' OutputSignals = {\n'
for signalDict in inputSignals:
gamText += ' '+signalDict['name']+' = {\n'
gamText += ' DataSource = '+gamName+'_Logger\n'
gamText += ' Type = '+signalDict['type']+'\n'
gamText += ' NumberOfDimensions = ' + \
str(signalDict['dimensions'])+'\n'
gamText += ' NumberOfElements = ' + \
str(signalDict['elements'])+'\n'
gamText += ' }\n'
for signalDict in outputSignals:
gamText += ' '+signalDict['name']+' = {\n'
gamText += ' DataSource = '+gamName+'_Logger\n'
gamText += ' NumberOfDimensions = ' + \
str(signalDict['dimensions'])+'\n'
gamText += ' NumberOfElements = ' + \
str(signalDict['elements'])+'\n'
gamText += ' Type = '+signalDict['type']+'\n'
gamText += ' }\n'
gamText += ' }\n'
gamText += ' }\n'
gams.append(gamText)
dataSourceText = ' +'+gamName+'_Logger = {\n'
dataSourceText += ' Class = LoggerDataSource\n'
dataSourceText += ' }\n'
dataSources.append(dataSourceText)
gamList.append(gamName+'_Logger_IOGAM')
return outPeriod
# SYNCH and NON SYNCH INPUT
def getMarteInputInfo(self, threadMap, gams, dataSources, gamList, isSynch):
configDict = self.getGamInfo()
dataSourceName = configDict['gamName']
dataSourceClass = configDict['gamClass']
timebase = configDict['timebase']
paramDicts = configDict['paramDicts']
outputDicts = configDict['outputDicts']
outputTrigger = configDict['outputTrigger']
outPeriod = 0 # If different from 0, this means that the corresponing component is driving the thread timing
startTime = 0
if not isSynch:
# handle timebase as GAM for non synchronizing inputs
if isinstance(timebase, Range):
period = timebase.getDescAt(2).data()
try:
startTime = timebase.getDescAt(0).data()
except:
startTime = 0
outPeriod = period # Driving thread timing
dataSourceText = ' +'+dataSourceName+'_Timer' + ' = {\n'
dataSourceText += ' Class = LinuxTimer\n'
dataSourceText += ' SleepNature = "Default"\n'
dataSourceText += ' Signals = {\n'
dataSourceText += ' Counter = {\n'
dataSourceText += ' Type = uint32\n'
dataSourceText += ' }\n'
dataSourceText += ' Time = {\n'
dataSourceText += ' Type = uint32\n'
dataSourceText += ' }\n'
dataSourceText += ' }\n'
dataSourceText += ' }\n'
dataSources.append(dataSourceText)
dataSourceText = ' +'+dataSourceName+'_Timer_DDB = {\n'
dataSourceText += ' Class = GAMDataSource\n'
dataSourceText += ' }\n'
dataSources.append(dataSourceText)
gamList.append(dataSourceName+'Timer_IOGAM')
gamText = ' +'+dataSourceName+'Timer_IOGAM = {\n'
gamText += ' Class = IOGAM\n'
gamText += ' InputSignals = {\n'
gamText += ' Counter = {\n'
gamText += ' DataSource = '+dataSourceName+'_Timer\n'
gamText += ' Type = uint32\n'
gamText += ' NumberOfElements = 1\n'
gamText += ' }\n'
gamText += ' Time = {\n'
gamText += ' DataSource = '+dataSourceName+'_Timer\n'
gamText += ' Type = uint32\n'
gamText += ' NumberOfElements = 1\n'
gamText += ' Frequency = '+str(round(1./period, 4))+'\n'
gamText += ' }\n'
gamText += ' }\n'
gamText += ' OutputSignals = {\n'
gamText += ' Counter = {\n'
gamText += ' DataSource = '+dataSourceName+'_Timer_DDB\n'
gamText += ' Type = uint32\n'
gamText += ' }\n'
gamText += ' Time = {\n'
gamText += ' DataSource = '+dataSourceName+'_Timer_DDB\n'
gamText += ' Type = uint32\n'
gamText += ' NumberOfElements = 1\n'
gamText += ' }\n'
gamText += ' }\n'
gamText += ' }\n'
gams.append(gamText)
# Check if time information is required by another synchronized thread
if self.isUsedOnAnotherThread(threadMap, self.timebase, True):
dataSourceText = ' +'+dataSourceName+'_Timer_Synch = {\n'
dataSourceText += ' Class = RealTimeThreadSynchronisation\n'
dataSourceText += ' Timeout = 10000\n'
dataSourceText += ' }\n'
dataSources.append(dataSourceText)
gamList.append(dataSourceName+'Timer_Synch_IOGAM')
gamText = ' +'+dataSourceName+'Timer_Synch_IOGAM = {\n'
gamText += ' Class = IOGAM\n'
gamText += ' InputSignals = {\n'
gamText += ' Counter = {\n'
gamText += ' DataSource = '+dataSourceName+'_Timer_DDB\n'
gamText += ' Type = uint32\n'
gamText += ' NumberOfElements = 1\n'
gamText += ' }\n'
gamText += ' Time = {\n'
gamText += ' DataSource = '+dataSourceName+'_Timer_DDB\n'
gamText += ' Type = uint32\n'
gamText += ' NumberOfElements = 1\n'
# gamText += ' Frequency = '+str(round(1./period,4))+'\n'
gamText += ' }\n'
gamText += ' }\n'
gamText += ' OutputSignals = {\n'
gamText += ' Counter = {\n'
gamText += ' DataSource = '+dataSourceName+'_Timer_Synch\n'
gamText += ' Type = uint32\n'
gamText += ' }\n'
gamText += ' Time = {\n'
gamText += ' DataSource = '+dataSourceName+'_Timer_Synch\n'
gamText += ' Type = uint32\n'
gamText += ' NumberOfElements = 1\n'
gamText += ' }\n'
gamText += ' }\n'
gamText += ' }\n'
gams.append(gamText)
timerDDB = dataSourceName+'_Timer_DDB'
# Link to other component up in the chain
elif isinstance(timebase, TreeNode) or isinstance(timebase, TreePath):
prevTimebase = timebase
while isinstance(timebase, TreeNode) or isinstance(timebase, TreePath):
if isinstance(timebase, TreeNode):
prevTimebase = timebase
timebase = timebase.getData()
else:
prevTimebase = TreeNode(timebase, self.getTree())
timebase = prevTimebase.getData()
origName = self.convertPath(
prevTimebase.getParent().getFullPath())
# Check whether the synchronization source is a Synch Input. Only in this case, the origin DDB is its output DDB since that device is expected to produce Time
originMode = prevTimebase.getParent().getNode('mode').data()
try:
startTime = timebase.getDescAt(0).data()
except:
startTime = 0
if originMode == MARTE2_COMPONENT.MODE_SYNCH_INPUT:
if self.onSameThread(threadMap, prevTimebase.getParent()):
timerDDB = origName+'_Output_DDB'
else:
timerDDB = origName+'_Output_Synch'
try:
# Get period from driving synchronizing device
outPeriod = timebase.getDescAt(2).data()
except:
outPeriod = 0
else:
if self.onSameThread(threadMap, prevTimebase.getParent()):
timerDDB = origName+'_Timer_DDB'
else:
timerDDB = origName+'_Timer_Synch'
try:
# Get period from driving synchronizing device
outPeriod = timebase.getDescAt(2).data()
except:
outPeriod = 0
else:
print('ERROR: Invalid timebase definition')
return 0
# Unlike GAM no other configuration needs to be considered since there are no inputs
# if isSynch, timebase Will contain the defintion of the time that will be generated. Specific subclasses will connect it to DataSource specific parameters
# therefore no actions are taken here in addition. In this case, however, the component is driving thread timing
else: # isSynch
currTimebase = self.getNode('timebase').evaluate()
if isinstance(currTimebase, Range):
outPeriod = currTimebase.delta.data()
try:
startTime = currTimebase.begin.data()
except:
startTime = 0
else:
currTimebase = currTimebase.data()
outPeriod = currTimebase[1] - currTimebase[0]
startTime = currTimebase[0]
# endif isSynch
#Head and parameters
dataSourceText = ' +'+dataSourceName+' = {\n'
dataSourceText += ' Class = '+dataSourceClass+'\n'
dataSourceText = self.reportParameters(paramDicts, dataSourceText, 1)
# for paramDict in paramDicts:
# if paramDict['is_text']:
# dataSourceText += ' '+paramDict['name']+' = "'+str(paramDict['value'])+'"\n'
# else:
# dataSourceText += ' '+paramDict['name']+' = '+self.convertVal(str(paramDict['value']))+'\n'
# Output Signals
dataSourceText += ' Signals = {\n'
for outputDict in outputDicts:
dataSourceText += ' '+outputDict['name']+' = {\n'
dataSourceText += ' Type = '+outputDict['type']+'\n'
if outputDict['dimensions'] == 0:
numberOfElements = 1
numberOfDimensions = 0
else:
numberOfDimensions = len(outputDict['dimensions'])
numberOfElements = 1
for currDim in outputDict['dimensions']:
numberOfElements *= currDim
dataSourceText += ' NumberOfDimensions = ' + \
str(numberOfDimensions)+'\n'
dataSourceText += ' NumberOfElements = ' + \
str(numberOfElements)+'\n'
dataSourceText = self.addSignalParameters(
outputDict['value_nid'].getParent().getNode('parameters'), dataSourceText)
dataSourceText += ' }\n'
dataSourceText += ' }\n'
dataSourceText += ' }\n'
dataSources.append(dataSourceText)
# MDSWriter management
nonGamInputNodes = []
if configDict['storeSignals']:
dataSourceText = ' +'+dataSourceName+'_TreeOutput = {\n'
dataSourceText += ' Class = MDSWriter\n'
if outputTrigger == None:
dataSourceText += ' NumberOfBuffers = 20000\n'
else:
dataSourceText += ' NumberOfBuffers = ' + \
str(configDict['preTrigger'] +
configDict['postTrigger']+1)+'\n'
dataSourceText += ' NumberOfPreTriggers = ' + \
str(configDict['preTrigger'])+'\n'
dataSourceText += ' NumberOfPostTriggers = ' + \
str(configDict['postTrigger'])+'\n'
dataSourceText += ' CPUMask = ' + \
str(configDict['cpuMask'])+'\n'
dataSourceText += ' StackSize = 10000000\n'
dataSourceText += ' TreeName = "'+self.getTree().name+'"\n'
dataSourceText += ' PulseNumber = ' + \
str(self.getTree().shot)+'\n'
if outputTrigger == None:
dataSourceText += ' StoreOnTrigger = 0\n'
else:
dataSourceText += ' StoreOnTrigger = 1\n'
dataSourceText += ' EventName = "'+dataSourceName+'UpdatejScope"\n'
dataSourceText += ' TimeRefresh = 1\n'
dataSourceText += ' Signals = {\n'
currTimebase = self.getNode('timebase').evaluate()
if isinstance(currTimebase, Range):
period = currTimebase.delta.data()
else:
currTimebase = currTimebase.data()
period = currTimebase[1] - currTimebase[0]
# If trigger is defined put it as first signal
if outputTrigger != None:
dataSourceText += ' Trigger = {\n'
dataSourceText += ' Type = uint8\n'
dataSourceText += ' }\n'
# If the Input device is not synchronising, store time in outputs:time
if not isSynch:
dataSourceText += ' Time = {\n'
dataSourceText += ' NodeName = "' + \
configDict['outTimeNid'].getFullPath()+'"\n'
# We must keep into account the number of samples in an input device
dataSourceText += ' Period = ' + \
str(period/outputDict['samples'])+'\n'
dataSourceText += ' MakeSegmentAfterNWrites = 100\n'
dataSourceText += ' AutomaticSegmentation = 0\n'
dataSourceText += ' Type = uint32\n'
if startTime != 0:
dataSourceText += ' SamplePhase = ' + \
str(int(round(startTime/period)))+'\n'
dataSourceText += ' }\n'
outIdx = | |
split_oris is not False # set to boolean
if (exp_v_unexp + (len(quantpar.qu_idx) > 1) + ("half" in class_var)
+ split_oris) > 1:
raise ValueError("Cannot combine any of the following: separating "
"quartiles, exp_v_unexp, half comparisons, multiple Gabor frame "
"orientation comparisons.")
elif len(quantpar.qu_idx) > 2:
raise ValueError("Max of 2 quartiles expected.")
elif split_oris and len(stimpar.gabfr) > 2:
raise ValueError("Max of 2 Gabor frame sets expected for orientation "
"classification.")
# check for stimulus pre/post problems
pre_post_err = False
get_2nd, remconsec_unexps = False, False
if stimpar.pre > 0:
if stimpar.stimtype == "visflow":
if class_var == "unexps":
remconsec_unexps = True
elif stimpar.pre == 1:
get_2nd = True
else:
pre_post_err = True
else:
pre_post_err = True
if stimpar.post > 1.0:
if not stimpar.stimtype == "gabors" and stimpar.post <= 1.5:
pre_post_err = True
if pre_post_err:
raise NotImplementedError("Not implemented to prevent sequence overlap "
f"for {stimpar.stimtype}: {stimpar.pre} pre/{stimpar.post} post "
f"for {class_var} classification")
n = 1
if class_var == "unexps":
n_cl = len(unexps)
elif "half" in class_var:
n_cl = 2
# DOUBLE unexp ns to compensate for shorter blocks, if using control
n = 2
if "diff" in class_var:
quantpar = sess_ntuple_util.init_quantpar(4, [[1, 2]])
if len(np.unique(stim.main_flow_direcs)) != 2:
raise RuntimeError(
"Segments do not fit these criteria (missing directions).")
else:
quantpar = sess_ntuple_util.init_quantpar(2, [[0, 1]])
else:
n_cl = len(stimpar._asdict()[class_var])
# modify unexps, qu_idx, gabfr to cycle through datasets
if len(quantpar.qu_idx) == 2:
unexps = [unexps, unexps]
gabfr_idxs = ["ignore", "ignore"]
if exp_v_unexp:
raise ValueError(
"Cannot set exp_v_unexp to True if more than 1 quantile.")
if "part" in class_var:
raise ValueError("Cannot do half comparisons with quartiles.")
elif exp_v_unexp:
unexps = [unexps, 1-unexps]
gabfr_idxs = ["ignore", "ignore"]
quantpar = sess_ntuple_util.init_quantpar(1, [0, 0])
elif split_oris:
unexps = unexps
gabfr_idxs = [0, 1]
quantpar = sess_ntuple_util.init_quantpar(1, [0, 0])
else:
unexps = [unexps]
gabfr_idxs = ["ignore"]
gabfr_idxs = [0, 1] if split_oris else ["ignore", "ignore"]
# cycle through classes
roi_seqs = [[] for _ in range(len(quantpar.qu_idx))]
seq_classes = [[] for _ in range(len(quantpar.qu_idx))]
unexp_ns = [[] for _ in range(len(quantpar.qu_idx))]
# cycle through data groups (quant or exp_v_unexp or gabfr for oris)
for d, (qu_i, sub_unexps, gabfr_idx) in enumerate(
zip(quantpar.qu_idx, unexps, gabfr_idxs)):
for cl in range(n_cl):
use_qu_i = [qu_i]
unexp = sub_unexps
stimpar_sp = stimpar
if class_var == "unexps":
unexp = sub_unexps[cl]
elif "half" in class_var:
use_qu_i = [qu_i[cl]]
else:
keys = class_var
vals = stimpar._asdict()[class_var][cl]
if split_oris:
keys = [keys, "gabfr", "gab_ori"]
gabfr = stimpar.gabfr[gabfr_idx]
gab_ori = stimpar.gab_ori[cl]
vals = [vals, gabfr, gab_ori]
# modify stimpar
stimpar_sp = sess_ntuple_util.get_modif_ntuple(
stimpar, keys, vals)
roi_data, unexp_n = get_data(
stim, analyspar, stimpar_sp, quantpar, qu_i=use_qu_i,
unexp=unexp, remconsec_unexps=remconsec_unexps, n=n,
get_2nd=get_2nd)
roi_seqs[d].append(roi_data)
seq_classes[d].append(np.full(len(roi_data), cl))
unexp_ns[d].append(unexp_n)
# concatenate data split by class along trial seqs axis
roi_seqs[d] = np.concatenate(roi_seqs[d], axis=0)
seq_classes[d] = np.concatenate(seq_classes[d], axis=0)
# get logistic variance across datasets
log_var = np.log(np.var(np.concatenate(roi_seqs, axis=0)))
n_fr, nrois = roi_seqs[0].shape[1:] # in training set
if stimpar.stimtype == "gabors":
unexp_use = unexps[0]
if unexp_use == [0, 1] and not isinstance(stimpar.gabfr, list):
unexp_use = "any"
if split_oris:
gabfr_lett = [sess_str_util.gabfr_letters(
gabfr, unexp=unexp_use) for gabfr in stimpar.gabfr]
gabfr_lett = " -> ".join([str(lett) for lett in gabfr_lett])
else:
gabfr_lett = sess_str_util.gabfr_letters(
stimpar.gabfr, unexp=unexp_use)
stim_info = f"\nGab fr: {gabfr_lett}\nGab K: {stimpar.gabk}"
elif stimpar.stimtype == "visflow":
stim_info = (f"\nVisual flow dir: {stimpar.visflow_dir}\n"
f"Visual flow size: {stimpar.visflow_size}")
logger.info(f"Runtype: {sess.runtype}\nMouse: {sess.mouse_n}\n"
f"Sess: {sess.sess_n}\nPlane: {sess.plane}\nLine: {sess.line}\n"
f"Fluor: {analyspar.fluor}\nROIs: {nrois}{stim_info}\n"
f"Frames per seg: {n_fr}\nLogvar: {log_var:.2f}",
extra={"spacing": "\n"})
return roi_seqs, seq_classes, unexp_ns
#############################################
def sample_seqs(roi_seqs, seq_classes, n_unexp):
"""
sample_seqs(roi_seqs, seq_classes, n_unexp)
Samples sequences to correspond to the ratio of unexpected to expected
sequences.
Required args:
- roi_seqs (3D array) : array of all ROI trace sequences, structured
as: sequences x frames x ROIs
- seq_classes (2D array): array of all sequence classes (0, 1),
structured as class values x 1
- n_unexp (int) : number of unexpected sequences
Returns:
- roi_seqs (3D array) : array of selected ROI trace sequences,
structured as sequences x frames x ROIs
- seq_classes (2D array): array of sequence classes, structured as
class values x 1
"""
if np.unique(seq_classes).tolist() != [0, 1]:
raise ValueError("Function expects classes 0 and 1 only.")
class0_all = np.where(seq_classes == 0)[0]
class1_all = np.where(seq_classes == 1)[0]
n_exp = (len(class0_all) + len(class1_all))//2 - n_unexp
class0_idx = np.random.choice(class0_all, n_exp, replace=False)
class1_idx = np.random.choice(class1_all, n_unexp, replace=False)
roi_seqs = np.concatenate(
[roi_seqs[class0_idx], roi_seqs[class1_idx]], axis=0)
seq_classes = np.concatenate(
[seq_classes[class0_idx], seq_classes[class1_idx]], axis=0)
return roi_seqs, seq_classes
#############################################
def save_tr_stats(plot_data, plot_targ, data_names, analyspar, stimpar, n_rois,
alg="sklearn",mod=None, dirname="."):
"""
save_tr_stats(plot_data, plot_targ, data_names, stimpar, n_rois)
Extracts, saves and returns trace statistics in a json in the specified
directory.
Required args:
- plot_data (list) : list of 3D arrays of selected ROI trace seqs
to be plotted, each structured as
sequences x frames x ROIs
- plot_targ (list) : list of 2D arrays of sequence classes to be
plotted, each structured as class values x 1
- data_names (list) : names for each plot_data array
- analyspar (AnalysPar): named tuple containing analysis parameters
- stimpar (SessPar) : named tuple containing stimulus parameters
- n_rois (int) : number of ROIs in data
Optional args:
- alg (str) : algorithm used to run logistic regression
("sklearn" or "pytorch")
default: "sklearn"
- mod (sklearn pipeline): sklearn pipeline (model). Required if alg is
"sklearn"
default: None
- dirname (Path) : directory in which to save the traces
default: "."
Returns:
- tr_stats (dict) : dictionary of trace stats data
["n_rois"] (int) : number of ROIs
["train_ns"] (list) : number of segments per class
["train_class_stats"] (3D array) : training statistics, structured
as class x stats (me, err) x
frames
["xran"] (array-like) : x values for frames
optionally, if an additional named set (e.g., "test_Q4") is passed:
["set_ns"] (list) : number of segments per class
["set_class_stats"] (3D array): trace statistics,
structured as
class x stats (me, err) x
frames
"""
if len(data_names) != len(plot_data):
raise ValueError("Expected as many 'plot_data' items as 'data_names'.")
tr_stats = {"n_rois": n_rois}
classes = np.unique(plot_targ[0])
for data, targ, name in zip(plot_data, plot_targ, data_names): # get stats
if data is None:
continue
if alg == "sklearn":
# scales, flattens and optionally shuffles data
data = logreg_util.get_transf_data_sk(mod, data, False,
name=="train")
elif alg == "pytorch":
data = data.numpy()
targ = targ.numpy()
else:
gen_util.accepted_values_error("alg", alg, ["sklearn", "pytorch"])
xran, class_stats, ns = logreg_util.get_stats(
data, targ, stimpar.pre, stimpar.post, classes, analyspar.stats,
analyspar.error)
tr_stats["xran"] = xran.tolist()
tr_stats[f"{name}_class_stats"] = class_stats.tolist()
tr_stats[f"{name}_ns"] = ns
file_util.saveinfo(tr_stats, "tr_stats.json", dirname)
return tr_stats
#############################################
@logreg_util.catch_set_problem
def init_logreg_model_pt(roi_seqs, seq_classes, logregpar, extrapar,
scale=True, device="cpu", thresh_cl=2):
"""
init_logreg_model_pt(roi_seqs, seq_classes, logregpar, extrapar)
Initializes and returns the pytorch logreg model and dataloaders.
Required args:
- roi_seqs (list) : list of 3D arrays of selected ROI trace seqs
(1 or 2 if an additional test set is
included), each structured as
sequences x frames x ROIs
- seq_classes (list) : list of 2D arrays of sequence classes (0 or 1)
(1 or 2 if an additional test set is
included), each structured as
class values x 1
- logregpar (LogRegPar) : named tuple containing logistic regression
parameters
- extrapar (dict) : dictionary with extra parameters
["shuffle"] (bool): if True, data is shuffled
Optional args:
- scale (bool) : whether data is scaled by ROI
default: True
- device (str) : device to use
default: "cpu"
- thresh_cl (int) : size threshold for classes in each non empty
set beneath which the indices are reselected
(only if targets are passed). Not checked if
thresh_cl is 0.
default: 2
Returns:
- model (torch.nn.Module) : Neural network module with optimizer
and loss function as attributes
- dls (list of torch DataLoaders): list of torch DataLoaders for
each set. If a set is empty, the
corresponding dls value is None.
- extrapar (dict) : | |
<reponame>LittleBitProgrammer/myUniversity<filename>my_university_api/application/api/secretary/database_functions.py
# This file contain all database functions of the secretary
####################################################################
# import
####################################################################
from mysql.connector import Error # to use error
from flask_bcrypt import Bcrypt
bcrypt = Bcrypt()
####################################################################
# DB_functions
####################################################################
####################################################################
# Convenzioni:
# Per le POST
# def *operazione**Nometabella* (*input*, connection):
#
# Per le GET
# def get_all_*nome_tabella* (*input*, connection):
####################################################################
# function to insert head office inside database
def insertHeadOffice(nome_sede,
orario_apertura,
orario_chiusura,
numero_piani,
cap,
via_piazza,
civico,
connection):
try:
cursor = connection.cursor()
mySQL_query_insert_head_office = """INSERT INTO sede(nome_sede,
orario_apertura,
orario_chiusura,
numero_piani,
cap,
via_piazza,
civico)
VALUES(%s, %s, %s, %s, %s, %s, %s)"""
head_office_tuple = (nome_sede, orario_apertura, orario_chiusura, numero_piani, cap, via_piazza, civico)
cursor.execute(mySQL_query_insert_head_office, head_office_tuple)
connection.commit()
print("Record inserted successfully into SEDE table")
except Error as error:
print(f"Failed to insert into MySQL table {error}")
finally:
if connection.is_connected():
cursor.close()
connection.close()
print("MySQL connection is closed")
# function to gets all head offices
def get_all_head_offices(connection):
head_offices = []
try:
cursor = connection.cursor(dictionary=True)
mySQL_query_get_all_head_offices = """SELECT nome_sede,
orario_apertura,
orario_chiusura,
numero_piani,
cap,
via_piazza,
civico
FROM sede"""
cursor.execute(mySQL_query_get_all_head_offices)
head_offices = cursor.fetchall()
mySQL_query_get_all_head_office_contacts = """SELECT tipo_contatto, valore_contatto
FROM contatto_sede
WHERE nome_sede = %s"""
for head_office in head_offices:
cursor.execute(mySQL_query_get_all_head_office_contacts, (head_office['nome_sede'],))
head_office['contatti'] = cursor.fetchall()
except Error as error:
print(f"Failed to insert into MySQL table {error}")
finally:
if connection.is_connected():
cursor.close()
connection.close()
print("MySQL connection is closed")
return head_offices
# function to insert contact of an head office
def insertHeadOfficeContact(nome_sede, tipo_contatto, valore_contatto, connection):
try:
cursor = connection.cursor()
mySQL_query_insert_head_office_contact = """INSERT INTO contatto_sede(nome_sede, tipo_contatto, valore_contatto)
VALUES(%s,%s,%s)"""
contact_tuple = (tipo_contatto, valore_contatto)
head_office_contact_tuple = (nome_sede, tipo_contatto, valore_contatto)
mySQL_query_insert_contact = """INSERT INTO contatto(tipo_contatto,valore_contatto)
VALUES(%s,%s)"""
cursor.execute(mySQL_query_insert_contact, contact_tuple)
cursor.execute(mySQL_query_insert_head_office_contact, head_office_contact_tuple)
connection.commit()
except Error as error:
print(f"Failed to insert into MySQL table {error}")
finally:
if connection.is_connected():
cursor.close()
connection.close()
print("MySQL connection is closed")
# function to delete sede from database
def deleteHeadOffice(nome_sede, connection):
try:
cursor = connection.cursor()
head_office_tuple = (nome_sede,)
mySQL_query_delete_head_office_contact = """DELETE FROM contatto_sede
WHERE nome_sede = %s"""
mySQL_query_delete_head_office_location = """DELETE FROM ospitazione
WHERE nome_sede = %s"""
mySQL_query_delete_head_office_room = """DELETE FROM aula
WHERE nome_sede = %s"""
mySQL_query_delete_head_office_lesson = """DELETE FROM lezione
WHERE nome_sede = %s"""
mySQL_query_delete_head_office = """DELETE FROM sede
WHERE nome_sede = %s"""
cursor.execute(mySQL_query_delete_head_office_contact, head_office_tuple)
cursor.execute(mySQL_query_delete_head_office_location, head_office_tuple)
cursor.execute(mySQL_query_delete_head_office_lesson, head_office_tuple)
cursor.execute(mySQL_query_delete_head_office_room, head_office_tuple)
cursor.execute(mySQL_query_delete_head_office, head_office_tuple)
connection.commit()
print("Record deleted successfully")
except Error as error:
print(f"Failed to insert into MySQL table {error}")
finally:
if connection.is_connected():
cursor.close()
connection.close()
print("MySQL connection is closed")
# function to insert a room
def insertRoom(nome_sede,
numero_piano,
numero_aula,
capienza,
connection):
try:
cursor = connection.cursor()
mySQL_query_insert_room = """INSERT INTO aula(nome_sede,
numero_piano,
numero_aula,
capienza)
VALUES (%s, %s, %s, %s)"""
room_tuple = (nome_sede, numero_piano, numero_aula, capienza)
cursor.execute(mySQL_query_insert_room, room_tuple)
connection.commit()
except Error as error:
print(f'failed to insert into mySQL table {error}')
finally:
if connection.is_connected():
cursor.close()
connection.close()
print('MySQL connection is closed')
# function to delete room inside database
def deleteRoom(nome_sede,
numero_piano,
numero_aula,
connection):
try:
cursor = connection.cursor()
mySQL_query_delete_room_lesson = """DELETE FROM lezione
WHERE nome_sede = %s
AND numero_piano = %s
AND numero_aula = %s"""
mySQL_query_delete_room = """DELETE FROM aula
WHERE nome_sede = %s
AND numero_piano = %s
AND numero_aula = %s"""
delete_room_tuple = (nome_sede, numero_piano, numero_aula)
cursor.execute(mySQL_query_delete_room_lesson, delete_room_tuple)
cursor.execute(mySQL_query_delete_room, delete_room_tuple)
connection.commit()
except Error as error:
print(f'failed to insert into mySQL table {error}')
finally:
if connection.is_connected():
cursor.close()
connection.close()
print('MySQL connection is closed')
# function to insert degree course inside database
def insertDegreeCourse(codice_corso, nome_corso, durata_corso_laurea, connection):
try:
cursor = connection.cursor()
mySQL_query_insert_degree_course = """INSERT INTO corso_di_laurea (codice_corso, nome_corso, durata_corso_laurea)
VALUES (%s, %s, %s)"""
degree_course_tuple = (codice_corso, nome_corso, durata_corso_laurea)
cursor.execute(mySQL_query_insert_degree_course, degree_course_tuple)
connection.commit()
except Error as error:
print(f'failed to insert into mySQL table {error}')
finally:
if connection.is_connected():
cursor.close()
connection.close()
print('MySQL connection is closed')
# function to gets all degree course
def get_all_degree_courses(connection):
degree_courses = []
try:
cursor = connection.cursor(dictionary=True)
mySQL_query_get_all_degree_courses = """SELECT codice_corso,
nome_corso,
durata_corso_laurea
FROM corso_di_laurea"""
cursor.execute(mySQL_query_get_all_degree_courses)
degree_courses = cursor.fetchall()
except Error as error:
print(f"Failed to insert into MySQL table {error}")
finally:
if connection.is_connected():
cursor.close()
connection.close()
print("MySQL connection is closed")
return degree_courses
# function to delete room inside database
def deleteDegreeCourse(codice_corso, connection):
try:
cursor = connection.cursor()
delete_degree_course_tuple = (codice_corso,)
mySQL_query_delete_degree_course_location = """DELETE FROM ospitazione
WHERE codice_corso = %s"""
mySQL_query_delete_degree_course_is_in = """DELETE FROM appartiene
WHERE codice_corso = %s"""
mySQL_query_delete_degree_course_work = """DELETE FROM lavora
WHERE codice_corso = %s"""
mySQL_query_delete_degree_course_lesson = """DELETE FROM lezione
WHERE codice_corso = %s"""
mySQL_query_delete_degree_course_discipline = """DELETE FROM disciplina
WHERE codice_corso = %s"""
mySQL_query_delete_degree_course_alert = """DELETE FROM avviso
WHERE codice_corso = %s"""
mySQL_query_delete_degree_course_teaching = """DELETE FROM insegna
WHERE codice_corso = %s"""
mySQL_query_delete_degree_course_followed_discipline = """DELETE FROM disciplina_seguita
WHERE codice_corso = %s"""
mySQL_query_delete_degree_course_newletter_subscription = """DELETE FROM iscrizione_newsletter
WHERE codice_corso = %s"""
mySQL_query_delete_degree_course = """DELETE FROM corso_di_laurea
WHERE codice_corso = %s"""
cursor.execute(mySQL_query_delete_degree_course_location, delete_degree_course_tuple)
cursor.execute(mySQL_query_delete_degree_course_is_in, delete_degree_course_tuple)
cursor.execute(mySQL_query_delete_degree_course_work, delete_degree_course_tuple)
cursor.execute(mySQL_query_delete_degree_course_lesson, delete_degree_course_tuple)
cursor.execute(mySQL_query_delete_degree_course_alert, delete_degree_course_tuple)
cursor.execute(mySQL_query_delete_degree_course_teaching, delete_degree_course_tuple)
cursor.execute(mySQL_query_delete_degree_course_followed_discipline, delete_degree_course_tuple)
cursor.execute(mySQL_query_delete_degree_course_newletter_subscription, delete_degree_course_tuple)
cursor.execute(mySQL_query_delete_degree_course_discipline, delete_degree_course_tuple)
cursor.execute(mySQL_query_delete_degree_course, delete_degree_course_tuple)
connection.commit()
except Error as error:
print(f'failed to insert into mySQL table {error}')
finally:
if connection.is_connected():
cursor.close()
connection.close()
print('MySQL connection is closed')
# function to insert located course-head office inside database
def insertLocation(nome_sede, codice_corso, connection):
try:
cursor = connection.cursor()
location_tuple = (nome_sede, codice_corso)
mySQL_query_insert_location = """INSERT INTO ospitazione (nome_sede, codice_corso)
VALUES (%s, %s)"""
cursor.execute(mySQL_query_insert_location, location_tuple)
connection.commit()
except Error as error:
print(f'failed to insert into mySQL table {error}')
finally:
if connection.is_connected():
cursor.close()
connection.close()
print('MySQL connection is closed')
# function to gets all location head office- degree course
def get_all_locations(connection):
locations = []
try:
cursor = connection.cursor(dictionary=True)
mySQL_query_get_all_locations = """SELECT corso_di_laurea.codice_corso,
corso_di_laurea.nome_corso,
corso_di_laurea.durata_corso_laurea,
sede.nome_sede,
sede.orario_apertura,
sede.orario_chiusura,
sede.numero_piani,
sede.cap,
sede.via_piazza,
sede.civico
FROM corso_di_laurea
NATURAL JOIN ospitazione
NATURAL JOIN sede"""
cursor.execute(mySQL_query_get_all_locations)
locations = cursor.fetchall()
except Error as error:
print(f"Failed to insert into MySQL table {error}")
finally:
if connection.is_connected():
cursor.close()
connection.close()
print("MySQL connection is closed")
return locations
# function to delete location inside database
def deleteLocation(nome_sede,
codice_corso,
connection):
try:
cursor = connection.cursor()
mySQL_query_delete_location = """DELETE FROM ospitazione
WHERE nome_sede = %s
AND codice_corso = %s"""
delete_location_tuple = (nome_sede, codice_corso)
cursor.execute(mySQL_query_delete_location, delete_location_tuple)
connection.commit()
except Error as error:
print(f'failed to insert into mySQL table {error}')
finally:
if connection.is_connected():
cursor.close()
connection.close()
print('MySQL connection is closed')
def insertDiscipline(codice_corso,
codice_disciplina,
nome_disciplina,
cfu,
semestre,
anno,
connection):
try:
cursor = connection.cursor()
discipline_tuple = (codice_corso, codice_disciplina, nome_disciplina, cfu, semestre, anno)
mySQL_query_insert_discipline = """INSERT INTO disciplina(codice_corso,
codice_disciplina,
nome_disciplina,
cfu,
semestre,
anno)
VALUES (%s, %s, %s, %s, %s, %s)"""
cursor.execute(mySQL_query_insert_discipline, discipline_tuple)
connection.commit()
except Error as error:
print(f'failed to insert into mySQL table {error}')
finally:
if connection.is_connected():
cursor.close()
connection.close()
print('MySQL connection is closed')
# function to gets all location head office- degree course
def get_all_discipline(connection):
discipline = []
try:
cursor = connection.cursor(dictionary=True)
mySQL_query_get_all_discipline = """SELECT corso_di_laurea.nome_corso,
disciplina.nome_disciplina,
disciplina.codice_disciplina,
disciplina.cfu,disciplina.anno,
disciplina.semestre
FROM corso_di_laurea
NATURAL JOIN disciplina
ORDER BY corso_di_laurea.nome_corso"""
cursor.execute(mySQL_query_get_all_discipline)
discipline = cursor.fetchall()
except Error as error:
print(f"Failed to insert into MySQL table {error}")
finally:
if connection.is_connected():
cursor.close()
connection.close()
print("MySQL connection is closed")
return discipline
# function to delete discipline inside database
def deleteDiscipline(codice_corso, codice_disciplina, connection):
try:
cursor = connection.cursor()
delete_discipline_tuple = (codice_corso, codice_disciplina)
mySQL_query_delete_discipline_lesson = """DELETE FROM lezione
WHERE codice_corso = %s
AND codice_disciplina = %s"""
mySQL_query_delete_discipline_alert = """DELETE FROM avviso
WHERE codice_corso = %s
AND codice_disciplina = %s"""
mySQL_query_delete_discipline_teaching = """DELETE FROM insegna
WHERE codice_corso = %s
AND codice_disciplina = %s"""
mySQL_query_delete_discipline_followed_discipline = """DELETE FROM disciplina_seguita
WHERE codice_corso = %s
AND codice_disciplina = %s"""
mySQL_query_delete_discipline_newletter_subscription = """DELETE FROM iscrizione_newsletter
WHERE codice_corso = %s
AND codice_disciplina = %s"""
mySQL_query_delete_discipline = """DELETE FROM disciplina
WHERE codice_corso = %s
AND codice_disciplina = %s"""
cursor.execute(mySQL_query_delete_discipline_lesson, delete_discipline_tuple)
cursor.execute(mySQL_query_delete_discipline_alert, delete_discipline_tuple)
cursor.execute(mySQL_query_delete_discipline_teaching, delete_discipline_tuple)
cursor.execute(mySQL_query_delete_discipline_followed_discipline, delete_discipline_tuple)
cursor.execute(mySQL_query_delete_discipline_newletter_subscription, delete_discipline_tuple)
cursor.execute(mySQL_query_delete_discipline, delete_discipline_tuple)
connection.commit()
except Error as error:
print(f'failed to insert into mySQL table {error}')
finally:
if connection.is_connected():
cursor.close()
connection.close()
print('MySQL connection is closed')
# function to insert teacher inside database
def insertTeacher(cf,
nome,
cognome,
data_di_nascita,
luogo_di_nascita,
cap,
via_piazza,
civico,
matricola_docente,
email_docente,
password_docente,
connection):
try:
cursor = connection.cursor()
pw_hash = bcrypt.generate_password_hash(password_docente)
# tuple of person and student
person_tuple = (cf, nome, cognome, data_di_nascita, luogo_di_nascita, cap, via_piazza, civico)
teacher_tuple = (matricola_docente, cf, email_docente, pw_hash)
mySql_insert_person = """INSERT INTO persona(cf,
nome,
cognome,
data_di_nascita,
luogo_di_nascita,
cap,
via_piazza,
civico)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s) """
mySql_insert_teacher = """INSERT INTO docente(matricola_docente,
cf,
email_docente,
password_docente)
VALUES (%s, %s, %s, %s) """
cursor.execute(mySql_insert_person, person_tuple)
cursor.execute(mySql_insert_teacher, teacher_tuple)
connection.commit()
print("Record inserted successfully into Person and Student table")
except Error as error:
print(f"Failed to insert into | |
str(i))(transformation_params)
if i == 0 or i == 1 or i == 2:
y_max, y_min = 0.201, -0.201
Pasta_Para = Lambda( lambda x: tf.expand_dims( (y_max - y_min) * x + y_min, 1 ),
name = 'Mapping_' + str(i))(out)
branch_outputs.append(Pasta_Para)
elif i == 3 or i == 4 or i == 5:
y_max, y_min = 20.00 * 0.01745, -20.00 * 0.01745
# y_max, y_min = 0.34900, -0.34900
# y_max, y_min = 30.01 * 0.01745, -30.01 * 0.01745
# y_max, y_min = 30.01, -30.01
Pasta_Para = Lambda(lambda x: tf.expand_dims(tf.sin((y_max - y_min) * x + y_min),1), name = 'Mapping_' + str(i))(out)
branch_outputs.append(Pasta_Para)
elif i > 5 and i < 12:
y_max, y_min = 0.051, -0.051
Pasta_Para = Lambda( lambda x: tf.expand_dims( (y_max - y_min) * x + y_min, 1 ),
name = 'Mapping_' + str(i))(out)
branch_outputs.append(Pasta_Para)
elif i > 5:
y_max, y_min = 1.051, 0.951
Pasta_Para = Lambda( lambda x: tf.expand_dims( (y_max - y_min) * x + y_min, 1 ),
name = 'Mapping_' + str(i))(out)
branch_outputs.append(Pasta_Para)
PASTA_15 = Concatenate( axis = 1, name = 'PASTA_15')(branch_outputs)
AFF_12 = Lambda( lambda x:layers.Combining_Affine_Para3D(x), name = 'AFF_12')([src, PASTA_15])
else:
initial_affine_matrix = [
1.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 1.0, 0.0,
]
initial_affine_matrix = tf.constant_initializer(value=initial_affine_matrix)
AFF_12 = Dense(12, bias_initializer=initial_affine_matrix, name = 'AFF_12')(x)
Affine_Warped = Lambda( lambda x: layers.affine_flow_3D(x), output_shape = layers.affine_flow_3D_output_shape, name = 'Affine_Warpped')([src1, AFF_12])
model = Model(inputs = [src, tgt], outputs = [Affine_Warped,Affine_Warped])
model.input_tgt = tgt
model.output_Affine_Warped = Affine_Warped
model.AFF_12 = AFF_12
if PASTA == True:
model.PASTA_15 = PASTA_15
if "NCCLoss" in exp_name:
model.compile(
optimizer = optimizer,
loss = [ affine_Loss_zoo(tgt, Affine_Warped, "NCCLoss"),
affine_Loss_zoo(tgt, Affine_Warped, "NCCLoss")],
loss_weights = [1, 0],
metrics=[metrics.DC_Score, metrics.DC_Score]
)
elif "DCLoss" in exp_name:
model.compile(
optimizer = optimizer,
loss = [ affine_Loss_zoo(tgt, Affine_Warped, "DCLoss"),
affine_Loss_zoo(tgt, Affine_Warped, "DCLoss")],
loss_weights = [1, 0],
metrics=[metrics.DC_Score, metrics.DC_Score]
)
return model
def CroStchNet2D(exp_name, optimizer, unit_num, PASTA):
def Conv2dBlock(filters, x):
dpr = 0.2
x = Conv2D(
filters, (3, 3),
padding='same',
data_format='channels_first',
use_bias=False)(x)
x = BatchNormalization()(x)
x = Activation(lambda x : relu(x, alpha = 0.01))(x)
x = Dropout(dpr, seed = 1)(x)
x = Conv2D(
filters, (3, 3),
padding='same',
data_format='channels_first',
use_bias=False)(x)
x = BatchNormalization()(x)
x = Activation(lambda x : relu(x, alpha = 0.01))(x)
x = Dropout(dpr, seed = 2)(x)
x = AveragePooling2D(pool_size=(2, 2), data_format='channels_first')(x)
return x
class CrossStitch(Layer):
# basic parameter setting
def __init__(self,input_shape):
super(CrossStitch, self).__init__()
self.shape = np.prod(input_shape[1:])
self.input_shape_1 = self.shape._value
self.input_shape_2 = self.shape._value
# self.output_shape = [input_shape[1],input_shape[2],input_shape[3]]
# in cross-stitch network: [xa,xb]*[papameter]=[xa',xb'], the detail refer to the paper
def build(self, input_shape):
shape = self.input_shape_1 + self.input_shape_2
self.cross_stitch = self.add_weight(
shape=(shape,shape),
initializer=tf.initializers.identity(),
name='CrossStitch')
self.built = True
# conduct implement of the detailed algorithm calculation
# inputs represent the output of upper layer, such as x=Dense(parameter)(inputs)
def call(self,inputs):
x1 = Reshape((self.shape,))(inputs[0])
x2 = Reshape((self.shape,))(inputs[1])
inputss = tf.concat((x1, x2), axis=1)
output = tf.matmul(inputss, self.cross_stitch)
output1 = output[:,:self.input_shape_1]
output2 = output[:,self.input_shape_2:]
# print(output1.shape)
# print(inputs[0].shape)
s1 = inputs[0].shape[1]._value
s2 = inputs[0].shape[2]._value
s3 = inputs[0].shape[3]._value
output1 = tf.reshape(
output1,
shape=[tf.shape(inputs[0])[0],s1,s2,s3])
output2 = tf.reshape(
output2,
shape=[tf.shape(inputs[0])[0],s1,s2,s3])
return [output1, output2]
src = Input((1, 64, 64), name = 'src')
tgt = Input((1, 64, 64), name = 'tgt')
x1 = src
x2 = tgt
num_channels = [ 64, 64, 32, 16, 8, 8]
if unit_num == 1:
cs_level = [ 5]
elif unit_num == 2:
cs_level = [ 4, 5]
elif unit_num == 3:
cs_level = [ 3, 4, 5]
elif unit_num == 4:
cs_level = [ 2, 3, 4, 5]
elif unit_num == 5:
cs_level = [ 1, 2, 3, 4, 5]
elif unit_num == 6:
cs_level = [ 0, 1, 2, 3, 4, 5]
for i in range(len(num_channels)):
# print(num_channels[i])
x1 = Conv2dBlock(num_channels[i], x1)
x2 = Conv2dBlock(num_channels[i], x2)
if i in cs_level:
[x1,x2] = CrossStitch(x1.shape)([x1,x2])
x = Concatenate( axis = 1 )([x1, x2])
x = Flatten()(x)
if PASTA == True:
transformation_params = Dense(7)(x)
max_value = 1.0
Threshold = 0.0
transformation_params = Activation(lambda x : relu(x, alpha = 0.01, max_value = max_value, threshold = Threshold))(transformation_params)
branch_outputs = []
for i in range(7):
out = Lambda(lambda x: x[:, i], name = "Splitting_" + str(i))(transformation_params)
if i == 0 or i == 1:
y_max, y_min = 0.201, -0.201
Pasta_Para = Lambda( lambda x: tf.expand_dims( (y_max - y_min) * x + y_min, 1 ),
name = 'Mapping_' + str(i))(out)
branch_outputs.append(Pasta_Para)
elif i == 2:
# y_max, y_min = 20.00 * 0.01745, -20.00 * 0.01745
# y_max, y_min = 0.34900, -0.34900
y_max, y_min = 30.01 * 0.01745, -30.01 * 0.01745
# y_max, y_min = 30.01, -30.01
Pasta_Para = Lambda(lambda x: tf.expand_dims(tf.sin((y_max - y_min) * x + y_min),1), name = 'Mapping_' + str(i))(out)
branch_outputs.append(Pasta_Para)
elif i == 3 or i == 4:
y_max, y_min = 0.101, -0.101
Pasta_Para = Lambda( lambda x: tf.expand_dims( (y_max - y_min) * x + y_min, 1 ),
name = 'Mapping_' + str(i))(out)
branch_outputs.append(Pasta_Para)
elif i == 5 or i == 6:
y_max, y_min = 1.101, 0.901
Pasta_Para = Lambda( lambda x: tf.expand_dims( (y_max - y_min) * x + y_min, 1 ),
name = 'Mapping_' + str(i))(out)
branch_outputs.append(Pasta_Para)
PASTA_7 = Concatenate( axis = 1, name = 'PASTA_7')(branch_outputs)
AFF_6 = Lambda( lambda x:layers.Combining_Affine_Para(x), name = 'AFF_6')([src, PASTA_7])
else:
initial_affine_matrix = [
1.0, 0.0, 0.0,
0.0, 1.0, 0.0]
initial_affine_matrix = tf.constant_initializer(value=initial_affine_matrix)
AFF_6 = Dense(6, bias_initializer=initial_affine_matrix, name = 'AFF_6')(x)
Affine_Warped = Lambda( lambda x: layers.affine_flow(x), output_shape = layers.affine_flow_output_shape, name = 'Affine_Warpped')([src, AFF_6])
model = Model(inputs = [src, tgt], outputs = [Affine_Warped,Affine_Warped])
model.input_tgt = tgt
model.output_Affine_Warped = Affine_Warped
model.AFF_6 = AFF_6
if PASTA == True:
model.PASTA_7 = PASTA_7
if "NCCLoss" in exp_name:
model.compile(
optimizer = optimizer,
loss = [ affine_Loss_zoo(tgt, Affine_Warped, "NCCLoss"),
affine_Loss_zoo(tgt, Affine_Warped, "NCCLoss")],
loss_weights = [1, 0],
metrics=[metrics.NCC_Score, metrics.NCC_Score]
)
return model
def DLIR3D(exp_name, optimizer, PASTA):
def Conv3dBlock(filters, x):
dpr = 0.2
x = Conv3D(
filters, kernel_size=3,
padding='same',
data_format='channels_first',
use_bias=False)(x)
x = BatchNormalization()(x)
x = Activation(lambda x : relu(x, alpha = 0.01))(x)
x = Dropout(dpr, seed = 1)(x)
x = Conv3D(
filters, kernel_size=3,
padding='same',
data_format='channels_first',
use_bias=False)(x)
x = BatchNormalization()(x)
x = Activation(lambda x : relu(x, alpha = 0.01))(x)
x = Dropout(dpr, seed = 2)(x)
x = AveragePooling3D(pool_size=2, data_format='channels_first')(x)
return x
src = Input((64, 64, 64), name = 'src')
tgt = Input((64, 64, 64), name = 'tgt')
src1 = Lambda(lambda x: K.expand_dims(x, axis=1))(src)
tgt1 = Lambda(lambda x: K.expand_dims(x, axis=1))(tgt)
x1 = src1
x2 = tgt1
# num_channels = [ 64, 64, 32, 16, 8, 8]
num_channels = [ 64, 32, 32, 32, 16, 16]
for level in num_channels:
# print(level)
x1 = Conv3dBlock(level, x1)
x2 = Conv3dBlock(level, x2)
x = Concatenate( axis = 1 )([x1, x2])
x = Flatten(data_format = 'channels_first')(x)
if PASTA == True:
# Translation_2D_Range = np.arange(-0.20,0.201,0.1)
# Rotation_2D_Range = np.arange(-30,30.01,10)
# Shear_2D_Range = np.arange(-0.1,0.101,0.05)
# Scale_2D_Range = np.arange(0.90,1.101,0.05)
initial_transformation_params = [
0.0, 0.0, 0.0,
0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
1.0, 1.0, 1.0]
initial_transformation_params = tf.constant_initializer(value=initial_transformation_params)
transformation_params = Dense(15, bias_initializer = initial_transformation_params, name = 'transformation_params')(x)
max_value = 1.0
Threshold = 0.0
transformation_params = Activation(lambda x : relu(x, alpha = 0.01, max_value = max_value, threshold = Threshold))(transformation_params)
branch_outputs = []
for i in range(15):
out = Lambda(lambda x: x[:, i], name = "Splitting_" + str(i))(transformation_params)
if i == 0 or i == 1 or i == 2:
y_max, y_min = 0.201, -0.201
Pasta_Para = Lambda( lambda x: tf.expand_dims( (y_max - y_min) * x + y_min, 1 ),
name = 'Mapping_' + str(i))(out)
branch_outputs.append(Pasta_Para)
elif i == 3 or i == 4 or i == 5:
y_max, y_min = 20.00 * 0.01745, -20.00 * 0.01745
# y_max, y_min = 0.34900, -0.34900
# y_max, y_min = 30.01 * 0.01745, -30.01 * 0.01745
# y_max, y_min = 30.01, -30.01
Pasta_Para = Lambda(lambda x: tf.expand_dims(tf.sin((y_max - y_min) * x + y_min),1), name = 'Mapping_' + str(i))(out)
branch_outputs.append(Pasta_Para)
elif i > 5 and i < 12:
y_max, | |
"total": {"type": "string"},
"limit": {"type": "string"},
"offset": {"type": "string"},
"order_by": {"type": "string"},
},
}
@property
def post_schema_output(self):
"""
JSON Schema to validate POST request body. Abstract.
Every schema must be a dict.
:return: dict
"""
return {}
@property
def post_schema_input(self): # pragma: no cover
"""
JSON schema of our model is generated here. Basically it is used for
Create method - list handler, method POST.
Hint: Modified version of this schema can be used for Update (PUT,
detail view).
:return: JSON schema of given model_cls Model.
:rtype: dict
"""
return self.model_cls.to_schema(excluded=['id'])
@property
def default_filter(self):
"""
Default queryset WHERE clause. Used for list queries first.
One must redefine it to customize filters.
:return: dict
"""
return {}
@property
def default_order_by(self):
"""
Default queryset ORDER BY clause. Used for list queries.
Order by must contain a string with a model field name.
"""
return ()
def prepare(self):
"""
Method to get and validate offset and limit params for GET REST request.
Total is boolean 1 or 0.
Works for GET method only.
"""
if self.request.method == 'GET':
prepare(self)
@classmethod
def qs_filter(cls, qs, flt, value, process_value=True):
"""
Private method to set WHERE part of query.
If required, Django-style filter is available via qs.filter()
and peewee.DQ - this method provides joins.
Filter relational operators are:
* NOT - '-', not operator, should be user as prefix
* < - 'lt', less than
* > - 'gt', greater than
* <= - 'lte', less than or equal
* >= - 'gte', greater than or equal
* != - 'ne', not equal
* LIKE - 'like', classic like operator
* ILIKE - 'ilike', case-insensitive like operator
* IN - 'in', classic in. Values should be separated by comma
* ISNULL - 'isnull', operator to know if smth is equal to null. Use -<fieldname>__isnull for IS NOT NULL
"""
neg = False
if flt[0] in '-':
# Register NOT filter clause
neg = True
flt = flt[1:]
fld_name, _, k = flt.rpartition('__')
if not fld_name:
# No underscore, simple filter
fld_name, k = k, ''
# Get filter
op = FILTER_MAP.get(k, operator.eq)
if neg:
_op = op
op = lambda f, x: operator.inv(_op(f, x))
# Get field from model
# raised AttributeError should be handled on higher level
fld = getattr(cls.model_cls, fld_name)
# Additional value processing
if process_value:
_v = value.decode()
if isinstance(fld, peewee.BooleanField) and _v in ('0', 'f'):
# Assume that '0' and 'f' are FALSE for boolean field
_v = False
elif k == 'in':
# Force set parameter to list
_v = _v.split(',')
elif k == 'isnull':
# ISNULL. Force set parameter to None
_v = None
else:
_v = value
# Send parameter to ORM
return qs.where(op(fld, _v))
@classmethod
def qs_order_by(cls, qs, value, process_value=True):
"""
Set ORDER BY part of response.
Fields are passed in a string with commas to separate values.
'-' prefix means descending order, otherwise it is ascending order.
:return: orderbyed queryset
:rtype: queryset
"""
# Empty parameters are skipped
if process_value:
_v = (_ for _ in value.decode().split(',') if _)
else:
_v = (value,)
for ordr in _v:
if ordr[0] == '-':
# DESC order
fld = getattr(cls.model_cls, ordr[1:])
qs = qs.order_by(fld.desc(), extend=True)
else:
# ASC order
fld = getattr(cls.model_cls, ordr)
qs = qs.order_by(fld, extend=True)
return qs
def get_queryset(self, paginate=True):
"""
Get queryset for model.
Override this method to change logic.
By default it uses qs_filter and qs_order_by.
All arguments for WHERE clause are passed with AND condition.
"""
# Set limit / offset parameters
qs = self.get_base_queryset()
if paginate:
qs = qs.limit(self.limit).offset(self.offset)
# Set default filter values
for k, v in self.default_filter.items():
qs = self.qs_filter(qs, k, v, process_value=False)
# Set default order_by values
for v in self.default_order_by:
qs = self.qs_order_by(qs, v, process_value=False)
for k, v in self.request.arguments.items():
if k in self.exclude_filter_args:
# Skipping special arguments (limit, offset etc)
continue
elif k == 'order_by':
# Ordering
qs = self.qs_order_by(qs, v[0])
else:
# Filtration. All arguments passed with AND condition (WHERE
# <...> AND <...> etc)
qs = self.qs_filter(qs, k, v[0])
return qs
async def _get_items(self, qs):
"""
Gets queryset and paginates it.
It executes database query. If total amount of items should be
received (self.total = True), queries are executed in parallel.
:param qs: peewee queryset
:return: tuple: executed query, pagination info (dict)
:raises: In case of bad query parameters - HTTP 400.
"""
pagination = {'offset': self.offset}
try:
if self.total:
# Execute requests to database in parallel (items + total)
awaitables = []
qs_total = self.get_queryset(paginate=False)
if self.prefetch_queries:
# Support of prefetch queries
awaitables.append(self.application.objects.prefetch(qs,
*self.prefetch_queries))
else:
awaitables.append(self.application.objects.execute(qs))
awaitables.append(self.application.objects.count(qs_total))
items, total = await multi(awaitables)
# Set total items number
pagination['total'] = total
else:
if self.prefetch_queries:
items = await self.application.objects.prefetch(qs,
*self.prefetch_queries)
else:
items = await self.application.objects.execute(qs)
except (peewee.DataError, ValueError) as e:
# Bad parameters
raise HTTPError(
400,
body=self.get_response(
errors=[
{
'code': '',
'message': 'Bad query arguments',
'detail': str(e)
}
]
)
)
# Set number of fetched items
pagination['limit'] = len(items) # TODO WTF? Why limit is set?
return items, pagination
async def get(self):
"""
Handles GET request.
1. Validates GET parameters using GET input schema and validator.
2. Executes query using given query parameters.
3. Paginates.
4. Serializes result.
5. Writes to response, not finishing it.
:raises: In case of bad query parameters - HTTP 400.
"""
await self.validate({k: self.get_argument(k) for k in self.request.query_arguments.keys()},
self.get_schema_input)
try:
qs = self.get_queryset()
except AttributeError as e:
# Wrong field name in filter or order_by
raise HTTPError(
400,
body=self.get_response(
errors=[
{
'code': '',
'message': 'Bad query arguments',
'detail': str(e)
}
]
)
)
items, pagination = await self._get_items(qs)
result = []
for m in items:
result.append(await self.serialize(m))
self.response(result={'items': result}, pagination=pagination)
async def head(self):
"""
Handles HEAD request.
1. Validates GET parameters using GET input schema and validator.
2. Fetches total amount of items and returns it in X-Total header.
3. Finishes response.
:raises: In case of bad query parameters - HTTPError 400.
"""
await self.validate({k: self.get_argument(k) for k in self.request.query_arguments.keys()},
self.get_schema_input)
try:
qs = self.get_queryset(paginate=False)
except AttributeError as e:
# Wrong field name in filter or order_by
# Request.body is not available in HEAD request
# No detail info will be provided
raise HTTPError(400)
try:
total_num = await self.application.objects.count(qs)
except (peewee.DataError, peewee.ProgrammingError, ValueError) as e:
# Bad parameters
# Request.body is not available in HEAD request
# No detail info will be provided
raise HTTPError(400)
self.set_header('X-Total', total_num)
self.finish()
async def post(self):
"""
Handles POST request.
Validates data and creates new item.
Returns serialized object written to response.
HTTPError 405 is raised in case of not creatable model (there must be
_create method implemented in model class).
HTTPError 400 is raised in case of violated constraints, invalid
parameters and other data and integrity errors.
:raises: HTTPError 405, 400
"""
data = await self.validate(self.request.body, self.post_schema_input)
try:
item = await self.model_cls._create(self.application, data)
except AttributeError as e:
# We can only create item if _create() model method implemented
raise HTTPError(
405,
body=self.get_response(
errors=[
{
'code': '',
'message': 'Method not allowed',
'detail': str(e)
}
]
)
)
except (peewee.IntegrityError, peewee.DataError) as e:
raise HTTPError(
400,
body=self.get_response(
errors=[
{
'code': '',
'message': 'Invalid parameters',
'detail': str(e)
}
]
)
)
self.response(result=await self.serialize(item))
class ApiItemHandler(ApiHandler):
"""
Base Item API Handler.
Supports R, U, D from CRUDL.
"""
def __init__(self, *args, **kwargs):
super(ApiItemHandler, self).__init__(*args, **kwargs)
self._instance = None
@property
def get_schema_input(self):
"""
JSON Schema to validate DELETE request body.
:returns: GET JSON schema
:rtype: dict
"""
return {
"type": "object",
"additionalProperties": False,
"properties": {}
}
@property
def put_schema_input(self):
"""
JSON Schema to validate PUT request body.
:return: JSON schema of PUT
:rtype: dict
"""
return self.model_cls.to_schema(excluded=['id'])
@property
def delete_schema_input(self):
"""
JSON Schema to validate DELETE request body.
:returns: JSON schema for DELETE.
:rtype: dict
"""
return {
"type": "object",
"additionalProperties": False,
"properties": {}
}
@property
def put_schema_output(self): # pragma: no cover
"""
| |
'''
TODO: make dragging cursor correct
'''
import collections
import random
from common import *
from Draggable import *
from Area import *
from Deck import *
from TokenBank import *
from CardsTileView import *
from DeckManipulator import *
from ThreatDial import *
from SetupDialog import *
from JourneyLogger import *
class _ScoringDialog(QDialog):
def __init__(self, parent=None):
super(_ScoringDialog, self).__init__(parent)
self.createUI()
def updateContent(self):
mainWindow = self.parentWidget()
threat = mainWindow.threatDial.value
dead = 0
for card in mainWindow.playerDiscardPile.getList(): # dead heroes are in Player Discard Pile
if isHeroCard(card.info['set'], card.info['id']):
dead += card.info['cost']
damages = 0
for card in mainWindow.heroArea.getList():
if isHeroCard(card.info['set'], card.info['id']):
damages += card.getState().get('D', 0) # damage tokens on hero
victory = mainWindow.victorySpinBox.value()
score = threat + dead + damages - victory
content = QString('<tt><center>') + \
self.tr(' Final Threat Level: %1').arg(threat, 3) + QString('<br>') + \
self.tr('+ Dead Heroes Cost: %1').arg(dead, 3) + QString('<br>') + \
self.tr('+ Damages on Heroes: %1').arg(damages, 3) + QString('<br>') + \
self.tr('- Victory Points: %1').arg(victory, 3) + \
QString('<hr>') + \
QString('<h2>%1</h2>').arg(self.tr('Final Score: %1').arg(score, 3)) + \
QString('</center><br></tt>')
content = content.replace(' ', ' ')
self.label.setText(content)
def showEvent(self, event):
if hasattr(self, 'lastGeometry'):
self.setGeometry(self.lastGeometry)
self.updateContent()
def closeEvent(self, event):
self.lastGeometry = self.geometry()
event.accept()
def createUI(self):
self.label = QLabel()
closeButton = QPushButton(QCoreApplication.translate('QObject', '&Close'))
closeButton.clicked.connect(self.close)
layout = QVBoxLayout()
layout.addWidget(self.label)
layout.addWidget(closeButton)
self.setLayout(layout)
self.resize(300, 300)
self.setWindowTitle(self.tr('Scoring'))
class _PhaseTips(QDialog):
def __init__(self, parent=None):
super(_PhaseTips, self).__init__(parent)
phaseData = {
'Resource': 'Add 1 resource to each hero.<br>Draw 1 card.',
'Planning': 'Play ally and attachment cards. (Resource type must match.)',
'Quest': '<b>Commit Characters</b>: Exhaust and commit characters to quest.<br><br><b>Staging</b>: Reveal 1 encounter card per player.<br><br><b>Quest Resolution</b>: Compare total committed willpower with total staging threat, add progress tokens to location/quest or raise threat.',
'Travel': 'May travel to 1 location if there is no active location.',
'Encounter': '<b>Player Engagement</b>: Players may choose to engage 1 enemy each.<br><br><b>Engagement Checks</b>: Check for each enemy if it engages a player (Engagement cost <= Player\'s threat level).',
'Combat': 'Deal 1 shadow card to each enemy (from highest engagement cost).<br><br><b>Resolve Enemy Attacks</b>:<br> Choose an enemy<br> Declare defender<br> Resolve shadow effect<br> Determine combat damage<br><br><b>Attack Enemies</b>:<br> Choose an enemy and declare attackers<br> Determine attack strength<br> Determine combat damage',
'Refresh': 'Each player raises threat level by 1.<br>Ready all cards.<br>Pass First-player token.',
}
self.tabWidget = QTabWidget()
for phase in ('Resource', 'Planning', 'Quest', 'Travel', 'Encounter', 'Combat', 'Refresh'):
text = phaseData[phase]
label = QLabel(text)
label.setMargin(10)
label.setWordWrap(True)
label.setAlignment(Qt.AlignLeft | Qt.AlignTop)
self.tabWidget.addTab(label, phase)
closeButton = QPushButton(QCoreApplication.translate('QObject', '&Close'))
closeButton.clicked.connect(self.close)
layout = QVBoxLayout()
layout.addWidget(self.tabWidget)
layout.addWidget(closeButton)
self.setLayout(layout)
self.resize(500, 300)
self.setWindowTitle(self.tr('Phase Tips'))
def showEvent(self, event):
if hasattr(self, 'lastGeometry'):
self.setGeometry(self.lastGeometry)
def closeEvent(self, event):
self.lastGeometry = self.geometry()
event.accept()
class _About(QMessageBox):
def __init__(self, parent=None):
text = '<br><center><h2>The Lord of the Rings: The Card Game</h2><big>version {0}</big><br><br>Program written by amulet (Taiwan)</center><br><br>Try <b>Left / Right / Double Click</b> and <b>Drag & Drop</b> everywhere.<br><br><a href="http://www.fantasyflightgames.com/edge_minisite_sec.asp?eidm=129&esem=4">Game rules</a> available at Fantasy Flight Games website.<br><br><code><a href="https://github.com/amulet-tw/LotR-LCG">Source code</a> licensed under GNU GPL v2.</code>'.format(VERSION)
super(_About, self).__init__(QMessageBox.Information, QCoreApplication.translate('_About', 'About'), text, QMessageBox.Ok, parent)
class _MulliganDialog(QMessageBox):
def __init__(self, parent=None):
super(_MulliganDialog, self).__init__(QMessageBox.Question, QCoreApplication.translate('_MulliganDialog', 'Take MULLIGAN?'), QString('<b>%1</b>').arg(QCoreApplication.translate('_MulliganDialog', 'Redraw hand?')), parent=parent)
yesButton = self.addButton(self.tr("OF COURSE!"), QMessageBox.YesRole)
yesButton.clicked.connect(self.parentWidget().takeMulligan)
noButton = self.addButton(self.tr("I'll keep them"), QMessageBox.NoRole)
noButton.clicked.connect(self.parentWidget().giveUpMulligan)
self.setDefaultButton(noButton)
self.setModal(False)
self.setMinimumWidth(200)
def closeEvent(self, event):
self.parentWidget().giveUpMulligan()
event.accept()
class MainWindow(QMainWindow):
AUTOSAVE_INTERVAL = 10000
AUTOSAVE_PATH = './resource/AutoSave.sav'
CONFIG_PATH = './resource/config.ini'
def __init__(self, parent=None):
super(MainWindow, self).__init__(parent)
self.createUI()
self.nameAreaMapping = {
'hand': self.handArea,
'hero': self.heroArea,
'engaged': self.engagedArea,
'staging': self.stagingArea,
'location': self.locationDeck,
'quest': self.questDeck,
'encounter': self.encounterDeck,
'encounterDP': self.encounterDiscardPile,
'prepare': self.prepareDeck,
'removed': self.removedPile,
'playerDP': self.playerDiscardPile,
}
self.deckManipulatorList = [] # for bookkeeping existing DeckManipulator instances
self.scenarioId = 0
self.playerDeckId = 0
self.isFirstPlayer = True # might change in MultiplayerMainWindow
self.playerCount = 1 # might change in MultiplayerMainWindow
if self.__class__.__name__ == 'MainWindow': # not true in MultiplayerMainWindow
if self.checkIfprogramCrashed():
self.loadGame(MainWindow.AUTOSAVE_PATH)
else:
self.startNewGame()
# auto save just work in Solo game
self.prevState = self.getState()
def autoSave():
state = self.getState()
if state != self.prevState:
jsonState = self.dumpState(state)
with open(MainWindow.AUTOSAVE_PATH, 'w') as f:
f.write(jsonState)
self.prevState = state
timer = QTimer(self)
timer.timeout.connect(autoSave)
timer.start(MainWindow.AUTOSAVE_INTERVAL)
def addDeckManipulator(self, widget):
self.deckManipulatorList.append(widget)
def cleanupDeckManipulators(self):
for widget in self.deckManipulatorList:
try:
widget.close()
except RuntimeError:
pass
self.deckManipulatorList = []
def cleanup(self):
self.victorySpinBox.setValue(0)
self.cleanupDeckManipulators()
for area in (self.engagedArea, self.heroArea, self.handArea, self.stagingArea, self.locationDeck, self.questDeck, self.encounterDeck, self.encounterDiscardPile, self.prepareDeck, self.removedPile, self.playerDeck, self.playerDiscardPile):
area.setList([])
while True: # remove Card from scene until none left
for card in area.scene.items():
if isinstance(card, Card):
area.scene.removeItem(card)
del card
break
else:
break
area.update()
self.journeyLogger.clearLog()
def startNewGame(self):
self.cleanup()
setupDialog = SetupDialog(self)
setupDialog.exec_()
self.scenarioId = setupDialog.selectedScenarioId()
self.playerDeckId = setupDialog.selectedDeckId()
self.setup()
self.prisonAct.setEnabled(self.scenarioId == 2) # is it Escape From Dol Guldur?
def restartGame(self):
self.cleanup()
self.setup()
self.prisonAct.setEnabled(self.scenarioId == 2) # is it Escape From Dol Guldur?
def startNewGameAction(self):
self.startNewGame()
def restartGameAction(self):
self.restartGame()
def saveGame(self):
state = self.getState()
state['version'] = VERSION
jsonState = self.dumpState(state)
filePath = QFileDialog.getSaveFileName(self, QCoreApplication.translate('MainWindow', 'Save game'), 'LotRLCG.sav', QCoreApplication.translate('MainWindow', 'Game Save (*.sav)'))
if filePath:
if not saveFile(filePath, jsonState):
QMessageBox.critical(self, QCoreApplication.translate('MainWindow', "Can't save game"), QCoreApplication.translate('MainWindow', 'Failed to write file!'))
def loadGame(self, filePath=''):
if not filePath:
filePath = QFileDialog.getOpenFileName(self, QCoreApplication.translate('MainWindow', 'Load game'), '.', QCoreApplication.translate('MainWindow', 'Game Save (*.sav)'))
if filePath:
file = QFile(filePath)
if file.open(QIODevice.ReadOnly | QIODevice.Text):
jsonState = str(file.readAll())
try:
state = json.loads(jsonState, encoding='ascii')
except ValueError:
QMessageBox.critical(self, QCoreApplication.translate('MainWindow', "Can't load game"), QCoreApplication.translate('MainWindow', 'Game save corrupted!'))
return
self.victorySpinBox.setValue(state['victory'])
self.threatDial.setValue(state['threat'])
for (name, area) in self.nameAreaMapping.items():
area.setState(state[name])
file.close()
else:
QMessageBox.critical(self, QCoreApplication.translate('MainWindow', "Can't load game"), QCoreApplication.translate('MainWindow', 'Failed to open file!'))
def dumpState(self, dictObject):
return json.dumps(dictObject, separators=(',', ':'), encoding='ascii')
def getState(self):
state = {}
state['victory'] = self.victorySpinBox.value()
state['threat'] = self.threatDial.value
for (name, area) in self.nameAreaMapping.items():
state[name] = area.getState()
return state
def setup(self):
self.setupPlayerCards()
self.promptMulligan()
def setupPlayerCards(self):
heroList = []
playerList = []
for (set_, id) in playerDecksInfo[self.playerDeckId]['deck']:
if isHeroCard(set_, id):
heroList.append((set_, id))
else:
playerList.append((set_, id))
random.shuffle(playerList)
# start creating Card instances
for (set_, id) in heroList:
self.heroArea.addCard(Card(cardsInfo[set_][id], revealed=True))
for (set_, id) in playerList:
self.playerDeck.addCard(Card(cardsInfo[set_][id]))
for i in range(6):
if self.playerDeck.getList():
card = self.playerDeck.draw()
if not card.revealed():
card.flip()
self.handArea.addCard(card)
threatValue = 0
for card in self.heroArea.getList():
threatValue += card.info.get('cost', 0)
self.threatDial.setValue(threatValue)
def promptMulligan(self):
mulliganDialog = _MulliganDialog(self)
mulliganDialog.show()
def takeMulligan(self):
for i in range(6):
card = self.handArea.draw()
card.flip()
self.playerDeck.addCard(card)
self.playerDeck.shuffle()
for i in range(6):
card = self.playerDeck.draw()
card.flip()
self.handArea.addCard(card)
self.mulliganDecisionIsMade()
def giveUpMulligan(self):
self.mulliganDecisionIsMade()
def mulliganDecisionIsMade(self):
self.setupEncounterCards()
self.logInitialState()
def setupEncounterCards(self):
assert(self.isFirstPlayer)
scenarioId = self.scenarioId
heroList = [] # additional cards that First Player gains control, according to quest card's instructions
questList = []
encounterList = []
stagingList = []
prepareList = []
for encounterName in scenariosInfo[scenarioId]['encounters']:
for set_ in SETS:
for (id, card) in enumerate(cardsInfo[set_]):
if card['icon'] == encounterName and card['type'] != 'quest':
for i in range(card['quantity']):
encounterList.append((set_, id))
random.shuffle(encounterList)
s = ''
if scenarioId <= 2:
s = 'core'
elif scenarioId <= 8:
s = 'mirkwood'
elif scenarioId == 9:
s = 'osgiliath'
elif scenarioId <= 12:
s = 'khazaddum'
else:
s = 'dwarrowdelf'
# EXPANSION
if scenarioId == 0: # Passage Through Mirkwood
questList = [(s, 119), (s, 120), (s, 121 + random.choice((0, 1)))]
stagingList = [(s, 96), (s, 99)]
for card in stagingList:
encounterList.remove(card)
random.shuffle(encounterList)
elif scenarioId == 1: # Journey Along the Anduin
questList = [(s, 126), (s, 127), (s, 128)]
hillTroll = (s, 82)
for i in range(self.playerCount):
stagingList.append(encounterList.pop(-1)) # draw one card from encounter deck to staging area. 1 card per player.
hillTrollAppeared = False
for card in stagingList:
if card == hillTroll:
hillTrollAppeared = True
if not hillTrollAppeared:
stagingList.append(hillTroll)
encounterList.remove(hillTroll)
random.shuffle(encounterList)
elif scenarioId == 2: # Escape From Dol Guldur
questList = [(s, 123), (s, 124), (s, 125)]
prepareList = [(s, 102)] # Nazgul of Dol Guldur
encounterList.remove((s, 102))
stagingList = [(s, 108), (s, 109), (s, 110)] # Gandalf's Map, Dungeon Torch, Shadow Key
for card in stagingList:
encounterList.remove(card)
elif scenarioId == 3: # The Hunt for Gollum
questList = [(s, 11), (s, 12), (s, 13)]
if self.playerCount == 1:
stagingList.append(encounterList.pop(-1)) # 1 card | |
<gh_stars>10-100
from __future__ import generators
from time import localtime, sleep
import os, Essbase, wmi,sys
from optparse import OptionParser
import win32serviceutil
import random
import zlib
from win32wnet import WNetAddConnection2, WNetCancelConnection2, error
import wmi
listTemp=[]
listComplete=[]
dictApps_Dbs={}
strDate=localtime()
parser= OptionParser()
parser.add_option("-s","--server",type="string",dest="Server",default="testserver",help="Enter the Essbase Server name.")
parser.add_option("-f","--files",type="string",dest="bDataFiles",default="F",help="Data files to be backed up?")
parser.add_option("-c","--clear",type="string",dest="bClear",default="F",help="Log files to be clear?")
parser.add_option("-r","--running",type="string",dest="bRunning",default="F",help="Shutdown running Application/Database for backup?")
#Command line arguments to process before backup is started
(options, args) = parser.parse_args()
print options.Server
print options.bDataFiles
print options.bClear
print options.bRunning
bRunning=options.bRunning.upper()
bClear=options.bClear.upper()
bDataFiles=options.bDataFiles.upper()
#This resets the log clear and data backup false to False is the running app/db cannot be shutdown
if bRunning=='F':
bClear='F'
bDataFiles='F'
#Creates List of file extensions to search before backup purposes
#Depending on command line arguments and Essbase running state
if bDataFiles=='F':
if bRunning=='F':
extensionList=["db","otl","csc","rul","app"]
else:
extensionList=["esm","tct","db","otl","csc","rul","app","log"]
else:
if bRunning=='F':
extensionList=["db","otl","csc","rul","app"]
else:
extensionList=["ind","pag","esm","tct","db","otl","csc","rul","app","log"]
#
hexStr='7bf255b64f46c12c0a240dbf5d4c02da1d805484'.decode('hex') #Decodes to HyperionTest
class Essbase_appdbs_status():
def __init__(self):
self.Not_loaded=0
self.Loading=1
self.Loaded=2
self.Unloading=3
self.Essbase=Essbase.Essbase()
self.hexStr='7bf255b64f46c12c0a240dbf5d4c02da1d805484'.decode('hex')#This hextstring decodes to: HyperionTest
self.pWord=self.tinycode('nokawtg',self.hexStr,reverse=True)
def essbase_service(self,machine):
#Uses the SC Query to obtain Essbase running state and return a 1 or 0
service="EssbaseService"
if win32serviceutil.QueryServiceStatus(service, machine)[1] == 4:
print "Essbase is running!!!!!!!!!!!!!!!!"
return 1
else:
return 0
def display_application(self,app):
#Checks the application status
firstMessage=0
#print ':%s:' % app
stat_level=self.Essbase.do('display application %s' % app)
if stat_level<>0:
firstMessage=self.Essbase.pop_msg()
print firstMessage
print self.Essbase.pop_msg()
print self.Essbase.pop_msg()
print self.Essbase.pop_msg()
if stat_level==0:
row=self.Essbase.fetch_row()
#print row
result=row[14]
if result==self.Loaded:
print "App: %s is Loaded" % app
return self.Loaded
elif result==self.Not_loaded:
print "App: %s is NOT Loaded" % app
return self.Not_loaded
elif result==self.Loading:
print "App: %s is Loading" % app
elif result==self.Unloading:
print "App: %s is Unloading" % app
elif firstMessage[0]<>1051030:
result=self.Not_loaded
def connect(self,user,password,server):
#connect to Essbase
if user.lower()=='hypadmin':password=<PASSWORD> #If hypadmin is used, pull the password internally
self.Essbase.connect(user,password,server)
def disconnect(self):
#housekeeping disconnect from Essbase
self.Essbase.disconnect()
def display_database(self,dbs):
#Checks the database status
firstMessage=0
if dbs=="TestApp.Direct":dbs="TestApp.'Direct'" #This was need to place single quotes around the MaxL reserved word: Direct
stat_level=self.Essbase.do("display database %s" % dbs)
if stat_level<>0:
firstMessage=self.Essbase.pop_msg()
print firstMessage
print self.Essbase.pop_msg()
print self.Essbase.pop_msg()
print self.Essbase.pop_msg()
if stat_level==0:
row=self.Essbase.fetch_row()
result=row[31]
if result==self.Loaded:
print "DBS: %s is Loaded" % dbs
return self.Loaded
elif result==self.Not_loaded:
print "DBS: %s is NOT Loaded" % dbs
return self.Not_loaded
elif result==self.Loading:
print "DBS: %s is Loading" % dbs
elif result==self.Unloading:
print "DBS: %s is Unloading" % dbs
elif firstMessage[0]<>1051030:
result=self.Not_loaded
def unload_db(self,app,dbs):
#Unload database. Used before backup starts
if dbs=="Direct":dbs="'Direct'" #This was need to place single quotes around the MaxL reserved word: Direct
stat_level=self.Essbase.do('alter application %s unload database %s' % (app,dbs))
sleep(5)
return stat_level
def load_db(self,app,dbs):
#Load database after backup complete
if dbs=="Direct":dbs="'Direct'" #This was need to place single quotes around the MaxL reserved word: Direct
stat_level=self.Essbase.do('alter application %s load database %s' % (app,dbs))
sleep(5)
return stat_level
def unload_app(self,app):
#Unload app before backup and clearing log file
stat_level=self.Essbase.do('alter system unload application %s' % (app))
sleep(5)
return stat_level
def load_app(self,app):
#Load app after backup and log clear completed
stat_level=self.Essbase.do('alter system load application %s' % (app))
sleep(5)
return stat_level
def display_application_all(self):
#This creates two lists. One list for all Running apps
#Second list for all stopped apps
listRunning=[]
listStopped=[]
firstMessage=0
stat_level=self.Essbase.do("display application all")
if stat_level<>0:
firstMessage=self.Essbase.pop_msg()
print firstMessage
print self.Essbase.pop_msg()
print self.Essbase.pop_msg()
print self.Essbase.pop_msg()
elif stat_level==0:
row=self.Essbase.fetch_row()
while type(row)!=type(None):
if row[14]==2:
listRunning.append(row[0])
else:
listStopped.append(row[0])
try:
row=self.Essbase.fetch_row()
except AttributeError:
break
return listRunning, listStopped
def display_database_all(self):
#This creates two lists. One list for all Running databases
#Second list for all stopped databases
listRunning=[]
listStopped=[]
firstMessage=0
stat_level=self.Essbase.do("display database all")
if stat_level<>0:
firstMessage=self.Essbase.pop_msg()
print firstMessage
print self.Essbase.pop_msg()
print self.Essbase.pop_msg()
print self.Essbase.pop_msg()
elif stat_level==0:
row=self.Essbase.fetch_row()
while type(row)!=type(None):
if row[31]==2:
listRunning.append('%s:%s' % (row[0],row[1]))
else:
listStopped.append('%s:%s' % (row[0],row[1]))
try:
row=self.Essbase.fetch_row()
except AttributeError:
break
return listRunning, listStopped
def netaddconnection(self,server,user,password):
#Creates a connection to the server using specified creditentials
if user=='hypadmin':
password=<PASSWORD>
user='global\\%s' % user
try:
WNetAddConnection2(0,None,'\\\\%s' % server,None,user,password)
except Exception, err:
if isinstance(err,error):
print err
if err[0]==1219:
self.netcancelconnection(server)
WNetAddConnection2(0,None,'\\\\%s' % server,None,user,password)
def netcancelconnection(self,server):
#Housekeeping to shutdown the server connection
#If this is not run, and future connections will use this existing
#connection.
try:
WNetCancelConnection2('\\\\%s' % server, 0, 0)
except Exception, err:
if isinstance(err,error):
print err
if err[0]==2250:
pass
def tinycode(self,key, text, reverse=False):#Encrypt/Decrypt code
rand = random.Random(key).randrange
if not reverse:
text = zlib.compress(text)
text = ''.join([chr(ord(elem)^rand(256)) for elem in text])
if reverse:
text = zlib.decompress(text)
return text
def app_process_id(self,server,luser,pword):
#This code uses WMI interface to obtain the application PIDs
#A list is returned with the PIDs as numeric values
lstProcess_ID=[]
if luser=='hypadmin':
pword=self.pWord
luser='global\\%s' % luser
c=wmi.WMI(computer=server,user=luser,password=pword)
for process in c.Win32_Process(name='esssvr.exe'):
#print process.ProcessID, process.Name
lstProcess_ID.append(process.ProcessID)
return lstProcess_ID
def BackwardsReader(self,file, BLKSIZE = 4096):
"""Read a file line by line, backwards"""
buf = ""
file.seek(-1, 2)
lastchar = file.read(1)
trailing_newline = (lastchar == "\n")
while 1:
newline_pos = buf.rfind("\n")
pos = file.tell()
if newline_pos != -1:
# Found a newline
line = buf[newline_pos+1:]
buf = buf[:newline_pos]
if pos or newline_pos or trailing_newline:
line += "\n"
yield line
elif pos:
# Need to fill buffer
toread = min(BLKSIZE, pos)
file.seek(-toread, 1)
buf = file.read(toread) + buf
file.seek(-toread, 1)
if pos == toread:
buf = "\n" + buf
else:
# Start-of-file
return
def find_application_process_id(self,Srvr,ID):
#match application to process id from Essbase Log
#The numberic ID must be converted to a string first. Such as str(ID)
#The Essbase log is read in reverse to find the ID
essbase_log=open(r'\\%s\d$\Hyperion\AnalyticServices\ESSBASE.LOG' % Srvr,'rb')
rline=self.BackwardsReader(essbase_log)
strApp=''
while 1:
tempLine=rline.next()
if tempLine.find(ID)<>-1:
print tempLine
strApp=tempLine[tempLine.find('[')+1:tempLine.find(']')]
break
del(rline)
essbase_log.close()
return strApp,ID
def dirwalk(dir,extension):#finds all files matching the file extensions specified
"walk a directory tree, using a generator"
for f in os.listdir(dir):
fullpath = os.path.join(dir,f)
if os.path.isdir(fullpath) and not os.path.islink(fullpath):
for x in dirwalk(fullpath,extension): # recurse into subdir
yield x
else:
if f[-3:].lower()==extension:
yield [dir,f]
def compressFiles(archiveName,fileList7z):
#7z compressor is used
#This gives a much higher compression ratio, but speed is slower on compression
#This function is hard-coded to the D:\Hyperion_files\archive directory
sts=os.system('C:\\Pstools\\7z.exe a -t7z d:\\Hyperion_files\\%s.7z @%s' % (archiveName,fileList7z))
return sts
def storeFiles(year,month,day):
#The 7z compressor is used, but in "store" mode. No compression is used.
#All of the file lists are added to this archive
sts=os.system('C:\\Pstools\\7z.exe a -t7z -mx0 d:\\Hyperion_files\\archive\\%s_%s_%s.7z d:\\Hyperion_files\\*.7z d:\\Hyperion_filelst\\all.log' % (year,month,day))
return sts
def completedFiles(strKey):
#Once the application and related databases are archived, the filelist used to create them is deleted from the D:\Hyperion_filelst directory
#The Hyperion_filelst directory will be empty when the compression is complete.
#Any file list that remain means that compression may have failed
print '%s.7z is complete' % strKey
listComplete.append('%s.7z' % strKey)
os.remove('d:\\Hyperion_filelst\\filelist_%s.lst' % strKey)
if __name__=='__main__':
outputFile=open('d:\\Hyperion_filelst\\all.log','wb') #Hardcoded output file for all the file list entries
stat=Essbase_appdbs_status() #Initialize the class
for strExtension in extensionList: #Start creating the file lists. These file list are sent to 7z for compression
for thefiles in dirwalk('d:\\Hyperion\\AnalyticServices\\app\\',strExtension):
if strExtension.lower() in ["app","log"]: #This use the database name of: all for file not related to a specific datbase
appName=thefiles[0][thefiles[0].rfind("\\")+1:]
dbName="all"
else:
appName=thefiles[0][thefiles[0].rfind("\\",1,thefiles[0].rfind("\\")-2)+1:thefiles[0].rfind("\\")]
dbName=thefiles[0][thefiles[0].rfind("\\")+1:]
if "%s.%s" % (appName,dbName) not in dictApps_Dbs.keys():#Creates a dictionary using the App.DB name as a key. The value is a list.
dictApps_Dbs["%s.%s" % (appName,dbName)]=[thefiles[0] + "\\" + thefiles[1]] #This is the first list entry
else:
listTemp=dictApps_Dbs["%s.%s" % (appName,dbName)]#Additional list entries are added by pulling the list out to a temp List,
listTemp.append(thefiles[0] + "\\" + thefiles[1])#appending the new entry, and reseting the dictionary to the updated list
dictApps_Dbs["%s.%s" % (appName,dbName)]=listTemp
outputFile.write('%s\\%s!%s\r\n' % (thefiles[0],thefiles[1],thefiles[1]))#Housekeeping log entry to the all.log
outputFile.flush()
#EssbaseRelated Backup Here
#These files are handled different, since they do not relate to the app\ directory
appName="Essbase"
dbName="all"
#This is the dictionary for all the Essbase related files
essBaseDict={'ESSBASE.BAK':'d:\\Hyperion\\AnalyticServices\\bin\\ESSBASE.BAK','essbase.cfg':'d:\\Hyperion\\AnalyticServices\\bin\\essbase.cfg','essbase.log':'d:\\Hyperion\\AnalyticServices\\essbase.log','ESSBASE.SEC':'d:\\Hyperion\\AnalyticServices\\bin\\ESSBASE.SEC'}
if stat.essbase_service(options.Server)==1: #essbase is running, cannot backup LOG or SEC file
del essBaseDict['ESSBASE.SEC']#This removes the key:value pairs for file that cannot be backuped or cleared when Essbase is running
del essBaseDict['essbase.log']
for strItem in essBaseDict.keys():
outputFile.write('%s\\!%s\r\n' % (essBaseDict[strItem],strItem))#Housekeeping entry to the all.log
outputFile.flush()
if "%s.%s" % (appName,dbName) not in dictApps_Dbs.keys():#Same dictionary entry as the Apps/Databases, but for Essbase itself
dictApps_Dbs["%s.%s" % (appName,dbName)]=["%s" % essBaseDict[strItem]]
else:
listTemp=dictApps_Dbs["%s.%s" % (appName,dbName)]
listTemp.append("%s" % essBaseDict[strItem])
dictApps_Dbs["%s.%s" % (appName,dbName)]=listTemp
else:#Since Essbase is not running, all files can be backed up
for strItem in essBaseDict.keys():
outputFile.write('%s\\!%s\r\n' % (essBaseDict[strItem],strItem))
outputFile.flush()
if "%s.%s" % (appName,dbName) not in dictApps_Dbs.keys():
dictApps_Dbs["%s.%s" % (appName,dbName)]=["%s" % essBaseDict[strItem]]
else:
listTemp=dictApps_Dbs["%s.%s" % (appName,dbName)]
listTemp.append("%s" % essBaseDict[strItem])
dictApps_Dbs["%s.%s" % (appName,dbName)]=listTemp
listKeys=dictApps_Dbs.keys() #Creates list for the App.Database keys
listKeys.sort()#Sorts the list so all Apps and databases are grouped together
if stat.essbase_service(options.Server)==1: #Essbase is running, so many additional checks/procedures will be needed
#One major check is to ensure the "Application" actually is an Essbase | |
# Copyright 2018 German Aerospace Center (DLR)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Created on 08.04.2016
@author: meinel
'''
from F2x.parser import tree
class VarDecl(tree.VarDecl):
"""
A variable declaration.
The following properties are available:
- name: The symbolic name of the variable.
- type: The C type of this variable. This might be a basic type (REAL, INTEGER, LOGICAL) or TYPE(C) for any
other type like arrays, derived types or strings.
- pytype, cstype: The type to be used by Python or C# respectively.
- intent: May be 'IN', 'OUT' or 'INOUT'.
- getter: This indicates whether the generated getter should be a 'function' or 'subroutine'.
- setter (opt): This indicates whether a 'subroutine' should be generated as setter.
- ftype (opt): The name of the derived type.
- strlen (opt): The length of the string.
- kind (opt): The kind specifier if available.
- dynamic (opt): Indicates whether the variable is 'ALLOCATABLE' or a 'POINTER'.
- dims (opt): For an array contains a list with the sizes per dimension.
"""
_PYTYPES = {
"REAL": "ctypes.c_double",
"INTEGER": "ctypes.c_int",
"LOGICAL": "ctypes.c_bool",
"TYPE(C_PTR)": "ctypes.c_void_p",
}
_CSTYPES = {
"REAL": "Double",
"INTEGER": "Int32",
"LOGICAL": "Int32",
"TYPE(C_PTR)": "IntPtr",
}
def _init_children(self):
self["name"] = self._ast.select1("name").tail[0]
# Identify FORTRAN type and store properties accordingly
full_spec = self._ast.parent().parent()
type_spec = full_spec.select1("declaration_type_spec")
try:
self["ftype"] = type_spec.select1("derived_type_spec name").tail[0]
self["type"] = "TYPE(C_PTR)"
self["getter"] = "function"
self["dynamic"] = False
except ValueError:
try:
self["strlen"] = int(type_spec.select1("char_selector int_literal_constant").tail[0])
self["intent"] = "IN"
self["type"] = "TYPE(C_PTR)"
self["pytype"] = "ctypes.c_char_p"
self["cstype"] = "String"
self["getter"] = "subroutine"
self["setter"] = "subroutine"
except ValueError:
try:
self["strlen"] = type_spec.select1("char_selector /(\*|:)/")
self["intent"] = "IN"
self["type"] = "TYPE(C_PTR)"
self["pytype"] = "ctypes.c_char_p"
self["cstype"] = "String"
self["getter"] = "subroutine"
self["setter"] = "subroutine"
except ValueError:
self["type"] = type_spec.select1("intrinsic_type_kind").tail[0]
self["getter"] = "function"
self["setter"] = "subroutine"
for attr in full_spec.select(self._prefix + "attr_spec"):
if 'ALLOCATABLE' in attr.tail:
self["dynamic"] = 'ALLOCATABLE'
elif 'POINTER' in attr.tail:
self["dynamic"] = 'POINTER'
# Identify array dimensions
for ast in (self._ast, full_spec):
dim_nodes = ast.select(self._prefix + "array_spec array_spec_element")
if not dim_nodes:
continue
dims = []
for node in dim_nodes:
dim = node.select("int_literal_constant")
if dim:
dims.append(dim[0].tail[0])
continue
dim = node.select("part_ref")
if dim:
dims.append(dim[0].tail[0])
break
dims.append(0)
if dims:
self["dims"] = dims
if "dims" in self \
and "strlen" not in self:
if "setter" in self:
del self["setter"]
if "pytype" not in self \
and self["type"].upper() in self._PYTYPES:
self["pytype"] = self._PYTYPES[self["type"].upper()]
if "cstype" not in self \
and self["type"].upper() in self._CSTYPES:
self["cstype"] = self._CSTYPES[self["type"].upper()]
try:
kind_selector = type_spec.select1("kind_selector int_literal_constant")
self["kind"] = int(kind_selector.tail[0])
except ValueError:
try:
kind_selector = type_spec.select1("kind_selector part_ref")
self["kind"] = kind_selector.tail[0]
except ValueError:
pass
try:
intent_spec = type_spec.parent().select1("intent_spec")
self["intent"] = intent_spec.tail[0]
except ValueError:
self["intent"] = 'IN'
# No setter for PARAMETERs
if "setter" in self \
and len(full_spec.select("attr_spec /PARAMETER/")) > 0:
del self["setter"]
def with_intent(self, intent):
self["intent"] = intent
return self
class TypeDef(tree.TypeDef):
def _init_children(self):
self["name"] = self._ast.select1("derived_type_stmt name").tail[0]
try:
self["public"] = (self._ast.select1("access_spec").tail[0].upper() == 'PUBLIC')
except ValueError:
self["public"] = False
self["fields"] = [
VarDecl(decl, 'component_') # See documentation of VarDecl.__init__
for decl in self._ast.select("component_decl")
]
for field in self["fields"]:
del field["intent"]
class SubDef(tree.SubDef):
_PREFIX = "subroutine"
def _init_children(self):
self["name"] = self._ast.select(self._PREFIX + "_stmt name")[0].tail[0]
# Two-stage argument extraction:
# First, identify all variables declared and the dummy argument list.
dummy_args = [arg.tail[0] for arg in self._ast.select("dummy_arg name")]
var_specs = dict(
(argdecl.select1("name").tail[0], VarDecl(argdecl))
for argdecl in self._ast.select("entity_decl")
)
# Fill up self["args"] based on dummy argument list order.
self["args"] = [var_specs[argname] for argname in dummy_args]
return var_specs # to be re-used in child classes.
class FuncDef(SubDef):
_PREFIX = "function"
def _init_children(self):
var_specs = super(FuncDef, self)._init_children()
# Capture return type of function for return value.
res_name = self._ast.select("result_name name")
if res_name:
self["ret"] = var_specs[res_name[0].tail[0]]
else:
try:
self["ret"] = var_specs[self["name"] + "_VALUE"]
except KeyError:
self["ret"] = var_specs[self["name"]]
if "dims" in self["ret"]:
self["ret"]["getter"] = "subroutine"
self["ret"]["intent"] = "OUT"
class Module(tree.Module):
def _init_children(self):
self["name"] = self._ast.select1("module_stmt name").tail[0]
self["uses"] = [use.tail[0] for use in self._ast.select("use_stmt name")]
self["types"] = [
TypeDef(typedef)
for typedef in self._ast.select("derived_type_def")
]
self["globals"] = [
VarDecl(var)
for var in self._ast.select("module > specification_part type_declaration_stmt entity_decl")
if len(var.parent().parent().select("access_spec /PUBLIC/")) > 0
]
# def export_methods(self, config):
def export_methods(self, src):
config = src.config
if config.has_section("export"):
export_items = [key for key, _ in config.items("export")]
else:
export_items = None
methods = []
for funcdef in self._ast.select("function_subprogram") :
if export_items is None or funcdef.select("function_stmt name")[0].tail[0].lower() in export_items:
method = FuncDef(funcdef)
method["export_name"] = config.get("export", method["name"].lower(), fallback=f'{self["name"]}_{method["name"]}')
if "ret" in method:
if "dims" in method["ret"]:
l_line = [line for line in src.source_lines if method["ret"]["name"] in line and "ALLOCATE" in line]
if len(l_line) == 1:
#ok, it is a dynamic array, find the size variable of the array
l_aux_line = l_line[0][l_line[0].find(method["ret"]["name"]):-2]
l_size_var = l_aux_line[len(method["ret"]["name"])+1:-1].split(',')
method["ret"]["dims"] = l_size_var
if method["ret"]["getter"] == "subroutine":
if method["ret"]["name"] == method["name"]:
method["ret"]["name"] = method["export_name"].upper() + '_OUT'
method["ret"]["intent"] = "OUT"
else:
method["ret"]["name"] = method["export_name"].upper() + '_RESULT'
del method["ret"]["intent"]
methods.append(method)
for subdef in self._ast.select("subroutine_subprogram") :
if export_items is None or subdef.select("subroutine_stmt name")[0].tail[0].lower() in export_items:
method = SubDef(subdef)
method["export_name"] = config.get("export", method["name"].lower(), fallback=f'{self["name"]}_{method["name"]}')
l_array_args = [ l_arg for l_arg in method["args"] if "dims" in l_arg ]
if len(l_array_args) > 0:
#okay, we have arguments of array type
sub_start, sub_end = self._get_subroutine(method["name"], src.source_lines)
for arg in l_array_args:
self._set_array_size(arg, src.source_lines[sub_start: sub_end])
if "ret" in method:
method["ret"]["name"] = method["export_name"].upper() + '_OUT'
method["ret"]["intent"] = "OUT"
methods.append(method)
self["methods"] = methods
for method in methods:
section_key = "{0}:Cleanup".format(method["name"])
if config.has_section(section_key):
if "ret" in method: print("FREE", section_key, method["ret"]["name"])
if "ret" in method and config.has_option(section_key, method["ret"]["name"]):
method["ret"]["free"] = config.get(section_key, method["ret"]["name"])
for var in method["args"]:
if config.has_option(section_key, var["name"]):
var["free"] = config.get(section_key, var["name"])
def _set_array_size(self, a_argument, a_src):
l_arg = a_argument["name"]
l_arg_len = len(l_arg)
l_key_len = 8 # keyword "ALLOCATE"
for index, line in enumerate(a_src) :
# to do: skip the comments
l_line = line[line.find("::")+2 : ].strip()
# this is the declaration line
if l_line.startswith(l_arg+'(') :
l_declare = l_line.split('!')
l_array_var = l_declare[0].strip()
l_size_var = l_array_var[l_arg_len+1:-1].split(',')
if l_size_var[0] == ':':
# check if the array is dynamically allocated within the function/subroutine body
for line in a_src[index:] :
line = line.strip()
if line.startswith("ALLOCATE") :
# skip comment
l_alloc = line.split('!')[0].strip()
l_line = l_alloc[l_key_len:].strip()[1:-1]
l_alloc_list = l_line.split('),')
# check if more than one variables are allocated
if len(l_alloc_list) > 1 :
for l_alloc in l_alloc_list :
l_alloc = l_alloc.strip()
if l_alloc.startswith(l_arg + '(') :
l_aux_line = ''
if l_alloc.endswith(')') :
l_aux_line = l_alloc[l_arg_len+1:-1].strip()
else :
l_aux_line = l_alloc[l_arg_len+1:].strip()
l_size_var = l_aux_line.split(',')
a_argument["dims"] = l_size_var
break
else :
l_alloc = l_alloc_list[0].strip()
if l_alloc.startswith(l_arg + '(') :
l_aux_line = l_alloc[l_arg_len+1:-1].strip()
l_size_var = l_aux_line.split(',')
a_argument["dims"] = l_size_var
else :
# okay, no size variable is found. It could be "IN" or "INOUT" type,
if len(l_declare) == 2 :
l_comment = l_declare[1].strip()
l_f2x_markup='@F2x=>'
if l_comment.startswith(l_f2x_markup) :
l_vars = l_comment.split(l_f2x_markup+l_arg)[1]
l_size_var = l_vars[1:-1].split(',')
a_argument["dims"] = l_size_var
else :
# Attention: no information is provided, code is not reliable !!
# But at leaset make sure the dimension is correctly set
n = len(l_size_var)
a_argument["dims"] = [ 0 if x == ':' else x for x in l_size_var ]
else :
# Same problem as above !!
n = len(l_size_var)
a_argument["dims"] = [ 0 if x == ':' else x for x in l_size_var ]
else :
# size variables are set explicitly
a_argument["dims"] = l_size_var
break
def _get_subroutine(self,a_argument, a_src):
startIndex = 0
stopIndex =0
for i in range(len(a_src)):
l_str = a_src[i].strip()
if l_str.startswith("SUBROUTINE") and a_argument in l_str :
startIndex = i
for j, line in enumerate(a_src[i:]):
line = line.strip()
if line.startswith("END SUBROUTINE") :
stopIndex = i + j
break
break
| |
<reponame>Orionisxoxo/Integracja_aplikacji_2
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013-2017 <NAME> <<EMAIL>>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
import inspect
import operator
import sys
import six
import gitlab
import gitlab.base
from gitlab import cli
import gitlab.v4.objects
class GitlabCLI(object):
def __init__(self, gl, what, action, args):
self.cls_name = cli.what_to_cls(what)
self.cls = gitlab.v4.objects.__dict__[self.cls_name]
self.what = what.replace("-", "_")
self.action = action.lower()
self.gl = gl
self.args = args
self.mgr_cls = getattr(gitlab.v4.objects, self.cls.__name__ + "Manager")
# We could do something smart, like splitting the manager name to find
# parents, build the chain of managers to get to the final object.
# Instead we do something ugly and efficient: interpolate variables in
# the class _path attribute, and replace the value with the result.
self.mgr_cls._path = self.mgr_cls._path % self.args
self.mgr = self.mgr_cls(gl)
types = getattr(self.mgr_cls, "_types", {})
if types:
for attr_name, type_cls in types.items():
if attr_name in self.args.keys():
obj = type_cls()
obj.set_from_cli(self.args[attr_name])
self.args[attr_name] = obj.get()
def __call__(self):
# Check for a method that matches object + action
method = "do_%s_%s" % (self.what, self.action)
if hasattr(self, method):
return getattr(self, method)()
# Fallback to standard actions (get, list, create, ...)
method = "do_%s" % self.action
if hasattr(self, method):
return getattr(self, method)()
# Finally try to find custom methods
return self.do_custom()
def do_custom(self):
in_obj = cli.custom_actions[self.cls_name][self.action][2]
# Get the object (lazy), then act
if in_obj:
data = {}
if hasattr(self.mgr, "_from_parent_attrs"):
for k in self.mgr._from_parent_attrs:
data[k] = self.args[k]
if gitlab.mixins.GetWithoutIdMixin not in inspect.getmro(self.cls):
data[self.cls._id_attr] = self.args.pop(self.cls._id_attr)
o = self.cls(self.mgr, data)
method_name = self.action.replace("-", "_")
return getattr(o, method_name)(**self.args)
else:
return getattr(self.mgr, self.action)(**self.args)
def do_project_export_download(self):
try:
project = self.gl.projects.get(int(self.args["project_id"]), lazy=True)
data = project.exports.get().download()
if hasattr(sys.stdout, "buffer"):
# python3
sys.stdout.buffer.write(data)
else:
sys.stdout.write(data)
except Exception as e:
cli.die("Impossible to download the export", e)
def do_create(self):
try:
return self.mgr.create(self.args)
except Exception as e:
cli.die("Impossible to create object", e)
def do_list(self):
try:
return self.mgr.list(**self.args)
except Exception as e:
cli.die("Impossible to list objects", e)
def do_get(self):
id = None
if gitlab.mixins.GetWithoutIdMixin not in inspect.getmro(self.mgr_cls):
id = self.args.pop(self.cls._id_attr)
try:
return self.mgr.get(id, **self.args)
except Exception as e:
cli.die("Impossible to get object", e)
def do_delete(self):
id = self.args.pop(self.cls._id_attr)
try:
self.mgr.delete(id, **self.args)
except Exception as e:
cli.die("Impossible to destroy object", e)
def do_update(self):
id = None
if gitlab.mixins.GetWithoutIdMixin not in inspect.getmro(self.mgr_cls):
id = self.args.pop(self.cls._id_attr)
try:
return self.mgr.update(id, self.args)
except Exception as e:
cli.die("Impossible to update object", e)
def _populate_sub_parser_by_class(cls, sub_parser):
mgr_cls_name = cls.__name__ + "Manager"
mgr_cls = getattr(gitlab.v4.objects, mgr_cls_name)
for action_name in ["list", "get", "create", "update", "delete"]:
if not hasattr(mgr_cls, action_name):
continue
sub_parser_action = sub_parser.add_parser(action_name)
sub_parser_action.add_argument("--sudo", required=False)
if hasattr(mgr_cls, "_from_parent_attrs"):
[
sub_parser_action.add_argument(
"--%s" % x.replace("_", "-"), required=True
)
for x in mgr_cls._from_parent_attrs
]
if action_name == "list":
if hasattr(mgr_cls, "_list_filters"):
[
sub_parser_action.add_argument(
"--%s" % x.replace("_", "-"), required=False
)
for x in mgr_cls._list_filters
]
sub_parser_action.add_argument("--page", required=False)
sub_parser_action.add_argument("--per-page", required=False)
sub_parser_action.add_argument("--all", required=False, action="store_true")
if action_name == "delete":
if cls._id_attr is not None:
id_attr = cls._id_attr.replace("_", "-")
sub_parser_action.add_argument("--%s" % id_attr, required=True)
if action_name == "get":
if gitlab.mixins.GetWithoutIdMixin not in inspect.getmro(cls):
if cls._id_attr is not None:
id_attr = cls._id_attr.replace("_", "-")
sub_parser_action.add_argument("--%s" % id_attr, required=True)
if hasattr(mgr_cls, "_optional_get_attrs"):
[
sub_parser_action.add_argument(
"--%s" % x.replace("_", "-"), required=False
)
for x in mgr_cls._optional_get_attrs
]
if action_name == "create":
if hasattr(mgr_cls, "_create_attrs"):
[
sub_parser_action.add_argument(
"--%s" % x.replace("_", "-"), required=True
)
for x in mgr_cls._create_attrs[0]
]
[
sub_parser_action.add_argument(
"--%s" % x.replace("_", "-"), required=False
)
for x in mgr_cls._create_attrs[1]
]
if action_name == "update":
if cls._id_attr is not None:
id_attr = cls._id_attr.replace("_", "-")
sub_parser_action.add_argument("--%s" % id_attr, required=True)
if hasattr(mgr_cls, "_update_attrs"):
[
sub_parser_action.add_argument(
"--%s" % x.replace("_", "-"), required=True
)
for x in mgr_cls._update_attrs[0]
if x != cls._id_attr
]
[
sub_parser_action.add_argument(
"--%s" % x.replace("_", "-"), required=False
)
for x in mgr_cls._update_attrs[1]
if x != cls._id_attr
]
if cls.__name__ in cli.custom_actions:
name = cls.__name__
for action_name in cli.custom_actions[name]:
sub_parser_action = sub_parser.add_parser(action_name)
# Get the attributes for URL/path construction
if hasattr(mgr_cls, "_from_parent_attrs"):
[
sub_parser_action.add_argument(
"--%s" % x.replace("_", "-"), required=True
)
for x in mgr_cls._from_parent_attrs
]
sub_parser_action.add_argument("--sudo", required=False)
# We need to get the object somehow
if gitlab.mixins.GetWithoutIdMixin not in inspect.getmro(cls):
if cls._id_attr is not None:
id_attr = cls._id_attr.replace("_", "-")
sub_parser_action.add_argument("--%s" % id_attr, required=True)
required, optional, dummy = cli.custom_actions[name][action_name]
[
sub_parser_action.add_argument(
"--%s" % x.replace("_", "-"), required=True
)
for x in required
if x != cls._id_attr
]
[
sub_parser_action.add_argument(
"--%s" % x.replace("_", "-"), required=False
)
for x in optional
if x != cls._id_attr
]
if mgr_cls.__name__ in cli.custom_actions:
name = mgr_cls.__name__
for action_name in cli.custom_actions[name]:
sub_parser_action = sub_parser.add_parser(action_name)
if hasattr(mgr_cls, "_from_parent_attrs"):
[
sub_parser_action.add_argument(
"--%s" % x.replace("_", "-"), required=True
)
for x in mgr_cls._from_parent_attrs
]
sub_parser_action.add_argument("--sudo", required=False)
required, optional, dummy = cli.custom_actions[name][action_name]
[
sub_parser_action.add_argument(
"--%s" % x.replace("_", "-"), required=True
)
for x in required
if x != cls._id_attr
]
[
sub_parser_action.add_argument(
"--%s" % x.replace("_", "-"), required=False
)
for x in optional
if x != cls._id_attr
]
def extend_parser(parser):
subparsers = parser.add_subparsers(
title="object", dest="what", help="Object to manipulate."
)
subparsers.required = True
# populate argparse for all Gitlab Object
classes = []
for cls in gitlab.v4.objects.__dict__.values():
try:
if gitlab.base.RESTManager in inspect.getmro(cls):
if cls._obj_cls is not None:
classes.append(cls._obj_cls)
except AttributeError:
pass
classes.sort(key=operator.attrgetter("__name__"))
for cls in classes:
arg_name = cli.cls_to_what(cls)
object_group = subparsers.add_parser(arg_name)
object_subparsers = object_group.add_subparsers(
title="action", dest="whaction", help="Action to execute."
)
_populate_sub_parser_by_class(cls, object_subparsers)
object_subparsers.required = True
return parser
def get_dict(obj, fields):
if isinstance(obj, six.string_types):
return obj
if fields:
return {k: v for k, v in obj.attributes.items() if k in fields}
return obj.attributes
class JSONPrinter(object):
def display(self, d, **kwargs):
import json # noqa
print(json.dumps(d))
def display_list(self, data, fields, **kwargs):
import json # noqa
print(json.dumps([get_dict(obj, fields) for obj in data]))
class YAMLPrinter(object):
def display(self, d, **kwargs):
try:
import yaml # noqa
print(yaml.safe_dump(d, default_flow_style=False))
except ImportError:
exit(
"PyYaml is not installed.\n"
"Install it with `pip install PyYaml` "
"to use the yaml output feature"
)
def display_list(self, data, fields, **kwargs):
try:
import yaml # noqa
print(
yaml.safe_dump(
[get_dict(obj, fields) for obj in data], default_flow_style=False
)
)
except ImportError:
exit(
"PyYaml is not installed.\n"
"Install it with `pip install PyYaml` "
"to use the yaml output feature"
)
class LegacyPrinter(object):
def display(self, d, **kwargs):
verbose = kwargs.get("verbose", False)
padding = kwargs.get("padding", 0)
obj = kwargs.get("obj")
def display_dict(d, padding):
for k in sorted(d.keys()):
v = d[k]
if isinstance(v, dict):
print("%s%s:" % (" " * padding, k.replace("_", "-")))
new_padding = padding + 2
self.display(v, verbose=True, padding=new_padding, obj=v)
continue
print("%s%s: %s" % (" " * padding, k.replace("_", "-"), v))
if verbose:
if isinstance(obj, dict):
display_dict(obj, padding)
return
# not a dict, we assume it's a RESTObject
if obj._id_attr:
id = getattr(obj, obj._id_attr, None)
print("%s: %s" % (obj._id_attr, id))
attrs = obj.attributes
if obj._id_attr:
attrs.pop(obj._id_attr)
display_dict(attrs, padding)
else:
if obj._id_attr:
id = getattr(obj, obj._id_attr)
print("%s: %s" % (obj._id_attr.replace("_", "-"), id))
if hasattr(obj, "_short_print_attr"):
value = getattr(obj, obj._short_print_attr) or "None"
value = value.replace("\r", "").replace("\n", " ")
# If the attribute is a note (ProjectCommitComment) then we do
# some modifications to fit everything on one line
line = "%s: %s" % (obj._short_print_attr, value)
# ellipsize long lines (comments)
if len(line) > 79:
line = line[:76] + "..."
print(line)
def display_list(self, data, fields, **kwargs):
verbose = kwargs.get("verbose", False)
for obj in data:
if isinstance(obj, gitlab.base.RESTObject):
self.display(get_dict(obj, fields), verbose=verbose, obj=obj)
else:
print(obj)
print("")
PRINTERS = {"json": JSONPrinter, "legacy": LegacyPrinter, "yaml": YAMLPrinter}
def run(gl, what, action, args, verbose, output, fields):
g_cli = GitlabCLI(gl, what, action, args)
data = g_cli()
printer = PRINTERS[output]()
if isinstance(data, dict):
printer.display(data, verbose=True, obj=data)
elif isinstance(data, list):
printer.display_list(data, fields, verbose=verbose)
elif isinstance(data, gitlab.base.RESTObject):
printer.display(get_dict(data, fields), verbose=verbose, obj=data)
elif isinstance(data, | |
the last layer has a Parabola activation
use_area_heuristic: c_bool
whether to use area heuristic
Returns
-------
None
"""
try:
ffn_handle_last_parabola_layer_no_alloc_c = fppoly_api.ffn_handle_last_parabola_layer_no_alloc
ffn_handle_last_parabola_layer_no_alloc_c.restype = None
ffn_handle_last_parabola_layer_no_alloc_c.argtypes = [ElinaManagerPtr, ElinaAbstract0Ptr, _doublepp, ndpointer(ctypes.c_double), c_size_t, c_size_t, POINTER(c_size_t), c_bool, c_bool]
ffn_handle_last_parabola_layer_no_alloc_c(man,element,weights,bias, num_out_neurons, num_in_neurons, predecessors, has_parabola, use_area_heuristic)
except Exception as inst:
print('Problem with loading/calling "ffn_handle_last_parabola_layer_no_alloc" from "libfppoly.so"')
print(inst)
def ffn_handle_last_log_layer(man, element, weights, bias, num_out_neurons, num_in_neurons, predecessors, has_log, use_area_heuristic):
"""
handle the last FFN Log layer
Parameters
----------
man : ElinaManagerPtr
Pointer to the ElinaManager.
element : ElinaAbstract0Ptr
Pointer to the ElinaAbstract0 abstract element
weights: POINTER(POINTER(c_double))
The weight matrix
bias : POINTER(c_size_t)
The bias vector
num_out_neurons: c_size_t
The number of output neurons
num_in_neurons: c_size_t
The number of input_neurons
predecessors:
the layers before the current layer
has_log: c_bool
if the last layer has a Log activation
use_area_heuristic: c_bool
whether to use area heuristic
Returns
-------
None
"""
try:
ffn_handle_last_log_layer_c = fppoly_api.ffn_handle_last_log_layer
ffn_handle_last_log_layer_c.restype = None
ffn_handle_last_log_layer_c.argtypes = [ElinaManagerPtr, ElinaAbstract0Ptr, _doublepp, ndpointer(ctypes.c_double), c_size_t, c_size_t, POINTER(c_size_t),c_bool, c_bool]
ffn_handle_last_log_layer_c(man,element,weights,bias, num_out_neurons, num_in_neurons, predecessors, has_log, use_area_heuristic)
except Exception as inst:
print('Problem with loading/calling "ffn_handle_last_log_layer" from "libfppoly.so"')
print(inst)
def ffn_handle_last_log_layer_no_alloc(man, element, weights, bias, num_out_neurons, num_in_neurons, predecessors, has_log, use_area_heuristic):
"""
handle the last FFN Log layer
Parameters
----------
man : ElinaManagerPtr
Pointer to the ElinaManager.
element : ElinaAbstract0Ptr
Pointer to the ElinaAbstract0 abstract element
weights: POINTER(POINTER(c_double))
The weight matrix
bias : POINTER(c_size_t)
The bias vector
num_out_neurons: c_size_t
The number of output neurons
num_in_neurons: c_size_t
The number of input_neurons
has_log: c_bool
if the last layer has a Log activation
use_area_heuristic: c_bool
whether to use area heuristic
Returns
-------
None
"""
try:
ffn_handle_last_log_layer_no_alloc_c = fppoly_api.ffn_handle_last_log_layer_no_alloc
ffn_handle_last_log_layer_no_alloc_c.restype = None
ffn_handle_last_log_layer_no_alloc_c.argtypes = [ElinaManagerPtr, ElinaAbstract0Ptr, _doublepp, ndpointer(ctypes.c_double), c_size_t, c_size_t, POINTER(c_size_t), c_bool, c_bool]
ffn_handle_last_log_layer_no_alloc_c(man,element,weights,bias, num_out_neurons, num_in_neurons, predecessors, has_log, use_area_heuristic)
except Exception as inst:
print('Problem with loading/calling "ffn_handle_last_log_layer_no_alloc" from "libfppoly.so"')
print(inst)
def subtract_output_neurons(man, element, y, x, use_area_heuristic):
"""
Computes bounds on y - x
Parameters
----------
man : ElinaManagerPtr
Pointer to the ElinaManager.
destructive : c_bool
Boolean flag.
y : ElinaDim
The dimension y in the constraint y-x>0.
x: ElinaDim
The dimension x in the constraint y-x>0.
use_area_heuristic: c_bool
whether to use area heuristic
Returns
-------
res = boolean
"""
res = None
try:
subtract_output_neurons_c = fppoly_api.subtract_output_neurons
subtract_output_neurons_c.restype = ElinaIntervalPtr
subtract_output_neurons_c.argtypes = [ElinaManagerPtr, ElinaAbstract0Ptr, ElinaDim, ElinaDim, c_bool]
res = subtract_output_neurons_c(man, element, y, x, use_area_heuristic)
except Exception as inst:
print('Problem with loading/calling "subtract_output_neurons" from "libfppoly.so"')
print(inst)
return res
def is_greater(man, element, y, x, use_area_heuristic):
"""
Check if y is strictly greater than x in the abstract element
Parameters
----------
man : ElinaManagerPtr
Pointer to the ElinaManager.
destructive : c_bool
Boolean flag.
y : ElinaDim
The dimension y in the constraint y-x>0.
x: ElinaDim
The dimension x in the constraint y-x>0.
use_area_heuristic: c_bool
whether to use area heuristic
Returns
-------
res = boolean
"""
res= None
try:
is_greater_c = fppoly_api.is_greater
is_greater_c.restype = c_bool
is_greater_c.argtypes = [ElinaManagerPtr, ElinaAbstract0Ptr, ElinaDim, ElinaDim, c_bool]
res = is_greater_c(man,element,y, x, use_area_heuristic)
except Exception as inst:
print('Problem with loading/calling "is_greater" from "libfppoly.so"')
print(inst)
return res
def conv_handle_first_layer(man, element, filter_weights, filter_bias, input_size, filter_size, num_filters, strides, is_valid_padding, has_bias, predecessors):
"""
Convolutional Matrix multiplication in the first layer
Parameters
----------
man : ElinaManagerPtr
Pointer to the ElinaManager.
element : ElinaAbstract0Ptr
Pointer to the ElinaAbstract0 abstract element.
filter_weights: POINTER(double)
filter weights
filter_bias: POINTER(double)
filter biases
input_size: POINTER(c_size_t)
size of the input
filter_size: POINTER(c_size_t)
size of the filters
num_filters: c_size_t
number of filters
strides: POINTER(c_size_t)
size of the strides
is_valid_padding: c_bool
if the padding is valid
has_bias: c_bool
if the filter has bias
predecessors:
the layers before the current layer
Returns
-------
None
"""
try:
conv_handle_first_layer_c = fppoly_api.conv_handle_first_layer
conv_handle_first_layer_c.restype = None
conv_handle_first_layer_c.argtypes = [ElinaManagerPtr, ElinaAbstract0Ptr, ndpointer(ctypes.c_double), ndpointer(ctypes.c_double), ndpointer(ctypes.c_size_t), POINTER(c_size_t), c_size_t, POINTER(c_size_t), c_bool, c_bool, POINTER(c_size_t)]
conv_handle_first_layer_c(man,element, filter_weights, filter_bias, input_size, filter_size, num_filters, strides, is_valid_padding, has_bias, predecessors)
except Exception as inst:
print('Problem with loading/calling "conv_handle_first_layer" from "libfppoly.so"')
print(inst)
return
def conv_handle_intermediate_relu_layer(man, element, filter_weights, filter_bias, input_size, filter_size, num_filters, strides, is_valid_padding, has_bias, predecessors, use_area_heuristic):
"""
Convolutional Matrix multiplication in an Intermediate layer
Parameters
----------
man : ElinaManagerPtr
Pointer to the ElinaManager.
element : ElinaAbstract0Ptr
Pointer to the ElinaAbstract0 abstract element.
filter_weights: POINTER(double)
filter weights
filter_bias: POINTER(double)
filter biases
input_size: POINTER(c_size_t)
size of the input
filter_size: POINTER(c_size_t)
size of the filters
num_filters: c_size_t
number of filters
strides: POINTER(c_size_t)
size of the strides
is_valid_padding: c_bool
if the padding is valid
has_bias: c_bool
if the filter has bias
predecessors:
the layers before the current layer
use_area_heuristic: c_bool
whether to use area heuristic
Returns
-------
None
"""
try:
conv_handle_intermediate_relu_layer_c = fppoly_api.conv_handle_intermediate_relu_layer
conv_handle_intermediate_relu_layer_c.restype = None
conv_handle_intermediate_relu_layer_c.argtypes = [ElinaManagerPtr, ElinaAbstract0Ptr, ndpointer(ctypes.c_double), ndpointer(ctypes.c_double), ndpointer(ctypes.c_size_t), POINTER(c_size_t), c_size_t, POINTER(c_size_t), c_bool, c_bool, POINTER(c_size_t), c_bool]
conv_handle_intermediate_relu_layer_c(man, element, filter_weights, filter_bias, input_size, filter_size, num_filters, strides, is_valid_padding, has_bias, predecessors, use_area_heuristic)
except Exception as inst:
print('Problem with loading/calling "conv_handle_intermediate_relu_layer" from "libfppoly.so"')
print(inst)
def handle_maxpool_layer(man, element, pool_size, input_size, predecessors):
"""
handle the Maxpool layer
Parameters
----------
man : ElinaManagerPtr
Pointer to the ElinaManager.
element : ElinaAbstract0Ptr
Pointer to the ElinaAbstract0 abstract element.
pool_size: POINTER(c_size_t)
The size of the Maxpool filter
input_size : POINTER(c_size_t)
The number of variables on which Maxpool will be applied.
predecessors:
the layers before the current layer
Returns
-------
res : c_size_t
Number of neurons in the last layer
"""
res=None
try:
handle_maxpool_layer_c = fppoly_api.handle_maxpool_layer
handle_maxpool_layer_c.restype = c_size_t
handle_maxpool_layer_c.argtypes = [ElinaManagerPtr, ElinaAbstract0Ptr, ndpointer(ctypes.c_size_t), ndpointer(ctypes.c_size_t), POINTER(c_size_t)]
res = handle_maxpool_layer_c(man, element, pool_size, input_size, predecessors)
except Exception as inst:
print('Problem with loading/calling "handle_maxpool_layer" from "libfppoly.so"')
print(inst)
return res
def handle_residual_layer(man, element, num_neurons, predecessors):
"""
handle the Residual layer
Parameters
----------
man : ElinaManagerPtr
Pointer to the ElinaManager.
element : ElinaAbstract0Ptr
Pointer to the ElinaAbstract0 abstract element.
num_neurons: c_size_t
The number of neurons in the residual layer
predecessors:
the layers before the current layer
Returns
-------
None
"""
try:
handle_residual_layer_c = fppoly_api.handle_residual_layer
handle_residual_layer_c.restype = None
handle_residual_layer_c.argtypes = [ElinaManagerPtr, ElinaAbstract0Ptr, c_size_t, POINTER(c_size_t)]
handle_residual_layer_c(man, element, num_neurons, predecessors)
except Exception as inst:
print('Problem with loading/calling "handle_residual_layer" from "libfppoly.so"')
print(inst)
def box_for_neuron(man, element,layerno, neuron_no):
"""
returns bounds for a neuron in a layer
Parameters
----------
man : ElinaManagerPtr
Pointer to the ElinaManager.
element : ElinaAbstract0Ptr
Pointer to the ElinaAbstract0.
layerno: c_size_t
the layer number
neuron_no: c_size_t
the neuron number in the layer
Returns
-------
interval_array : ElinaIntervalPtr
ElinaIntervalArray representing the hypercube.
"""
interval = None
try:
box_for_neuron_c = fppoly_api.box_for_neuron
box_for_neuron_c.restype = ElinaIntervalPtr
box_for_neuron_c.argtypes = [ElinaManagerPtr, ElinaAbstract0Ptr, c_size_t, c_size_t]
interval = box_for_neuron_c(man, element,layerno, neuron_no)
except:
print('Problem with loading/calling "box_for_neuron" from "fppoly.so"')
print('Make sure you are passing ElinaManagerPtr, ElinaAbstract0Ptr, c_size_t, c_size_t to the function')
return interval
def box_for_layer(man, element,layerno):
"""
returns bounds for all neurons in a layer
Parameters
----------
man : ElinaManagerPtr
Pointer to the ElinaManager.
element : ElinaAbstract0Ptr
Pointer to the ElinaAbstract0.
layerno: c_size_t
the layer number
Returns
-------
interval_array : ElinaIntervalArray
ElinaIntervalArray representing the hypercube.
"""
interval_array = None
try:
box_for_layer_c = fppoly_api.box_for_layer
box_for_layer_c.restype = ElinaIntervalArray
box_for_layer_c.argtypes = [ElinaManagerPtr, ElinaAbstract0Ptr, c_size_t]
interval_array = box_for_layer_c(man, element,layerno)
except:
print('Problem with loading/calling "box_for_layer" from "fppoly.so"')
print('Make sure you are passing ElinaManagerPtr, ElinaAbstract0Ptr, c_size_t to the function')
return interval_array
def get_num_neurons_in_layer(man, element,layerno):
"""
returns the number of neurons in a layer
Parameters
----------
man : ElinaManagerPtr
Pointer to the ElinaManager.
element : ElinaAbstract0Ptr
Pointer to the ElinaAbstract0.
layerno: c_size_t
the layer number
Returns
-------
interval_array : ElinaIntervalArray
ElinaIntervalArray representing the hypercube.
"""
res = 0
try:
get_num_neurons_in_layer_c = fppoly_api.get_num_neurons_in_layer
get_num_neurons_in_layer_c.restype = c_size_t
get_num_neurons_in_layer_c.argtypes = [ElinaManagerPtr, ElinaAbstract0Ptr, c_size_t]
res = get_num_neurons_in_layer_c(man, element,layerno)
except:
print('Problem with loading/calling "get_num_neurons_in_layer" from "fppoly.so"')
print('Make sure you are passing ElinaManagerPtr, ElinaAbstract0Ptr, c_size_t to the function')
return res
def update_bounds_for_neuron(man, element,layerno, neuron_no, lb, ub):
"""
returns bounds for a neuron in a layer
Parameters
----------
man : ElinaManagerPtr
Pointer to the ElinaManager.
element : ElinaAbstract0Ptr
Pointer to the ElinaAbstract0.
layerno: c_size_t
the layer number
neuron_no: c_size_t
the neuron number in the layer
lb: c_double
the updated lower bound
ub: c_double
the updated upper bound
Returns
-------
None
"""
try:
update_bounds_for_neuron_c = fppoly_api.update_bounds_for_neuron
update_bounds_for_neuron_c.restype = | |
<reponame>jochenparm/moler
# -*- coding: utf-8 -*-
"""
asyncio_in_thread_runner_with_raw_functions.py
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
A fully-functional connection-observer using configured concurrency variant.
This is Layer_3 example:
- shows configuration phase and usage phase
- configure named connections via config file
- uses Moler provided TCP connection implementation
- usage hides implementation variant via factories
- variant is known only during backend configuration phase
- uses connection observer with asyncio runner
This example demonstrates multiple connection observers working
on multiple connections.
Shows following concepts:
- multiple observers may observe single connection
- each one is focused on different data (processing decomposition)
- client code may run observers on different connections
- client code may "start" observers in sequence
Shows how to use connection observers inside raw 'def xxx()' functions and
how to mix it with threads.
Best choice here is to use 'asyncio-in-thread' runner.
"""
__author__ = '<NAME>'
__copyright__ = 'Copyright (C) 2018, Nokia'
__email__ = '<EMAIL>'
import logging
import sys
import os
import threading
import time
from moler.connection_factory import get_connection
from moler.exceptions import ConnectionObserverTimeout
from moler.runner_factory import get_runner
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", "..")) # allow finding modules in examples/
from network_toggle_observers import NetworkDownDetector, NetworkUpDetector
# ===================== Moler's connection-observer usage ======================
def ping_observing_task(ext_io_connection, ping_ip):
"""
Here external-IO connection is abstract - we don't know its type.
What we know is just that it has .moler_connection attribute.
"""
logger = logging.getLogger('moler.user.app-code')
conn_addr = str(ext_io_connection)
# Layer 2 of Moler's usage (ext_io_connection + runner):
# 3. create observers on Moler's connection
net_down_detector = NetworkDownDetector(ping_ip,
connection=ext_io_connection.moler_connection,
runner=get_runner(variant="asyncio-in-thread"))
net_up_detector = NetworkUpDetector(ping_ip,
connection=ext_io_connection.moler_connection,
runner=get_runner(variant="asyncio-in-thread"))
info = '{} on {} using {}'.format(ping_ip, conn_addr, net_down_detector)
logger.debug('observe ' + info)
# 4. start observer (nonblocking, using as future)
net_down_detector.start() # should be started before we open connection
# to not loose first data on connection
with ext_io_connection:
# 5. await that observer to complete
try:
net_down_time = net_down_detector.await_done(timeout=10) # =2 --> TimeoutError
timestamp = time.strftime("%H:%M:%S", time.localtime(net_down_time))
logger.debug('Network {} is down from {}'.format(ping_ip, timestamp))
except ConnectionObserverTimeout:
logger.debug('Network down detector timed out')
# 6. call next observer (blocking till completes)
info = '{} on {} using {}'.format(ping_ip, conn_addr, net_up_detector)
logger.debug('observe ' + info)
# using as synchronous function (so we want verb to express action)
detect_network_up = net_up_detector
net_up_time = detect_network_up() # if you want timeout - see code above
timestamp = time.strftime("%H:%M:%S", time.localtime(net_up_time))
logger.debug('Network {} is back "up" from {}'.format(ping_ip, timestamp))
logger.debug('exiting ping_observing_task({})'.format(ping_ip))
# ==============================================================================
def main(connections2observe4ip):
logger = logging.getLogger('asyncio.main')
logger.debug('starting jobs observing connections')
# Starting the clients
jobs_on_connections = []
for connection_name, ping_ip in connections2observe4ip:
# ------------------------------------------------------------------
# This front-end code hides all details of connection.
# We just use its name - such name should be meaningful for user.
# like: "main_dns_server", "backup_ntp_server", ...
# Another words, all we want here is stg like:
# "give me connection to main_dns_server"
# ------------------------------------------------------------------
# con_logger = logging.getLogger('tcp-async_in_thrd-io.{}'.format(connection_name))
# tcp_connection = get_connection(name=connection_name, variant='asyncio-in-thread', logger=con_logger)
tcp_connection = get_connection(name=connection_name, variant='asyncio-in-thread')
client_thread = threading.Thread(target=ping_observing_task,
args=(tcp_connection, ping_ip))
client_thread.start()
jobs_on_connections.append(client_thread)
# await observers job to be done
for client_thread in jobs_on_connections:
client_thread.join()
logger.debug('all jobs observing connections are done')
# ==============================================================================
if __name__ == '__main__':
from threaded_ping_server import start_ping_servers, stop_ping_servers
from asyncio_common import configure_logging
import os
from moler.config import load_config
# -------------------------------------------------------------------
# Configure moler connections (backend code)
# 1) configure variant by YAML config file
# 2) ver.2 - configure named connections by YAML config file
load_config(config=os.path.join(os.path.dirname(__file__), "..", "named_connections.yml"))
# 3) take default class used to realize tcp-threaded-connection
# -------------------------------------------------------------------
configure_logging()
net_1 = ('localhost', 5671)
net_2 = ('localhost', 5672)
connections2serve = [(net_1, '10.0.2.15'),
(net_2, '10.0.2.16')]
connections2observe4ip = [('net_1', '10.0.2.15'),
('net_2', '10.0.2.16')]
servers = start_ping_servers(connections2serve)
try:
main(connections2observe4ip)
finally:
stop_ping_servers(servers)
'''
LOG OUTPUT
15:28:23 |threaded.ping.tcp-server(5671) | MainThread |Ping Sim started at tcp://localhost:5671
15:28:23 |threaded.ping.tcp-server(5672) | MainThread |Ping Sim started at tcp://localhost:5672
15:28:23 |asyncio.main | MainThread |starting jobs observing connections
15:28:23 |moler.runner.asyncio-in-thrd:0 | Thread-3 |created AsyncioInThreadRunner:139990601181112
15:28:23 |moler.user.app-code | Thread-3 |observe 10.0.2.15 on tcp://localhost:5671 using NetworkDownDetector(id:7f521a0e27f0)
15:28:23 |moler.runner.asyncio-in-thrd:7f521a0e27f0 | Thread-3 |go background: NetworkDownDetector(id:7f521a0e27f0, using ThreadedMolerConnection(id:7f521a0e2470)-->[<bound method Tcp.send of <moler.io.raw.tcp.ThreadedTcp object at 0x7f521a0e2630>>])
15:28:23 |asyncio | Thread-3 |Using selector: EpollSelector
15:28:23 |moler.runner.asyncio-in-thrd:7f521a0e27f0 | Thread-3 |created loop 4 thread: 139990601182008:<_UnixSelectorEventLoop running=False closed=False debug=False>
15:28:23 |moler.runner.asyncio-in-thrd:7f521a0e27f0 | Thread-3 |created thread <TillDoneThread(Thread-5, initial)> with loop 139990601182008:<_UnixSelectorEventLoop running=False closed=False debug=True>
15:28:23 |moler.runner.asyncio-in-thrd:7f521a0e27f0 | Thread-5 |starting new asyncio-in-thrd loop ...
15:28:23 |moler.runner.asyncio-in-thrd:7f521a0e27f0 | Thread-3 |started new asyncio-in-thrd loop ...
15:28:23 |moler.user.app-code | Thread-4 |observe 10.0.2.16 on tcp://localhost:5672 using NetworkDownDetector(id:7f521a0d6710)
15:28:23 |moler.runner.asyncio-in-thrd:7f521a0d6710 | Thread-4 |go background: NetworkDownDetector(id:7f521a0d6710, using ThreadedMolerConnection(id:7f521a0e2780)-->[<bound method Tcp.send of <moler.io.raw.tcp.ThreadedTcp object at 0x7f521a0e2978>>])
15:28:23 |moler.runner.asyncio-in-thrd:7f521a0d6710 | Thread-5 |will await stop_event ...
15:28:23 |moler.NetworkDownDetector(id:7f521a0d6710) | Thread-5 |Observer 'network_toggle_observers.NetworkDownDetector' started.
15:28:23 |moler.net_2 | Thread-5 |Observer 'network_toggle_observers.NetworkDownDetector' started.
15:28:23 |moler.runner.asyncio-in-thrd:7f521a0d6710 | Thread-5 |START OF feed(NetworkDownDetector(id:7f521a0d6710))
15:28:23 |moler.runner.asyncio-in-thrd:7f521a0d6710 | Thread-5 |start feeding(NetworkDownDetector(id:7f521a0d6710))
15:28:23 |moler.runner.asyncio-in-thrd:7f521a0d6710 | Thread-5 |feed subscribing for data NetworkDownDetector(id:7f521a0d6710, using ThreadedMolerConnection(id:7f521a0e2780)-->[<bound method Tcp.send of <moler.io.raw.tcp.ThreadedTcp object at 0x7f521a0e2978>>])
15:28:23 |moler.runner.asyncio-in-thrd:7f521a0d6710 | Thread-5 |feeding(NetworkDownDetector(id:7f521a0d6710)) started
15:28:23 |moler.NetworkDownDetector(id:7f521a0e27f0) | Thread-5 |Observer 'network_toggle_observers.NetworkDownDetector' started.
15:28:23 |moler.net_1 | Thread-5 |Observer 'network_toggle_observers.NetworkDownDetector' started.
15:28:23 |moler.runner.asyncio-in-thrd:7f521a0d6710 | Thread-5 |START OF feed(NetworkDownDetector(id:7f521a0e27f0))
15:28:23 |moler.runner.asyncio-in-thrd:7f521a0d6710 | Thread-5 |start feeding(NetworkDownDetector(id:7f521a0e27f0))
15:28:23 |moler.runner.asyncio-in-thrd:7f521a0d6710 | Thread-5 |feed subscribing for data NetworkDownDetector(id:7f521a0e27f0, using ThreadedMolerConnection(id:7f521a0e2470)-->[<bound method Tcp.send of <moler.io.raw.tcp.ThreadedTcp object at 0x7f521a0e2630>>])
15:28:23 |moler.runner.asyncio-in-thrd:7f521a0d6710 | Thread-5 |feeding(NetworkDownDetector(id:7f521a0e27f0)) started
15:28:23 |threaded.ping.tcp-server(5672 -> 43373) | Thread-6 |connection accepted - client at tcp://127.0.0.1:43373
15:28:23 |moler.runner.asyncio-in-thrd:7f521a0d6710 | Thread-4 |go foreground: NetworkDownDetector(id:7f521a0d6710, using ThreadedMolerConnection(id:7f521a0e2780)-->[<bound method Tcp.send of <moler.io.raw.tcp.ThreadedTcp object at 0x7f521a0e2978>>]) - await max. 10 [sec]
15:28:23 |moler.net_2 | Thread-7 |
15:28:23 |threaded.ping.tcp-server(5671 -> 49571) | Thread-8 |connection accepted - client at tcp://127.0.0.1:49571
15:28:23 |moler.runner.asyncio-in-thrd:7f521a0d6710 | Thread-3 |go foreground: NetworkDownDetector(id:7f521a0e27f0, using ThreadedMolerConnection(id:7f521a0e2470)-->[<bound method Tcp.send of <moler.io.raw.tcp.ThreadedTcp object at 0x7f521a0e2630>>]) - await max. 10 [sec]
15:28:23 |moler.net_1 | Thread-9 |
15:28:24 |moler.net_2 | Thread-7 |greg@debian:~$ ping 10.0.2.16
15:28:24 |moler.net_1 | Thread-9 |greg@debian:~$ ping 10.0.2.15
15:28:25 |moler.net_2 | Thread-7 |PING 10.0.2.16 (10.0.2.16) 56(84) bytes of data.
15:28:25 |moler.net_1 | Thread-9 |PING 10.0.2.15 (10.0.2.15) 56(84) bytes of data.
15:28:26 |moler.net_2 | Thread-7 |64 bytes from 10.0.2.16: icmp_req=1 ttl=64 time=0.080 ms
15:28:26 |moler.net_1 | Thread-9 |64 bytes from 10.0.2.15: icmp_req=1 ttl=64 time=0.080 ms
15:28:27 |moler.net_2 | Thread-7 |64 bytes from 10.0.2.16: icmp_req=2 ttl=64 time=0.037 ms
15:28:27 |moler.net_1 | Thread-9 |64 bytes from 10.0.2.15: icmp_req=2 ttl=64 time=0.037 ms
15:28:28 |moler.net_2 | Thread-7 |64 bytes from 10.0.2.16: icmp_req=3 ttl=64 time=0.045 ms
15:28:28 |moler.net_1 | Thread-9 |64 bytes from 10.0.2.15: icmp_req=3 ttl=64 time=0.045 ms
15:28:29 |moler.net_2 | Thread-7 |ping: sendmsg: Network is unreachable
15:28:29 |moler.NetworkDownDetector(id:7f521a0d6710) | Thread-7 |Network 10.0.2.16 is down!
15:28:29 |moler.net_1 | Thread-9 |ping: sendmsg: Network is unreachable
15:28:29 |moler.NetworkDownDetector(id:7f521a0e27f0) | Thread-9 |Network 10.0.2.15 is down!
15:28:29 |moler.runner.asyncio-in-thrd:7f521a0d6710 | Thread-5 |feed done & unsubscribing NetworkDownDetector(id:7f521a0d6710, using ThreadedMolerConnection(id:7f521a0e2780)-->[<bound method Tcp.send of <moler.io.raw.tcp.ThreadedTcp object at 0x7f521a0e2978>>])
15:28:29 |moler.runner.asyncio-in-thrd:7f521a0d6710 | Thread-5 |END OF feed(NetworkDownDetector(id:7f521a0d6710))
15:28:29 |moler.runner.asyncio-in-thrd:7f521a0d6710 | Thread-5 |feed returning result: 1541514509.3102295
15:28:29 |moler.NetworkDownDetector(id:7f521a0d6710) | Thread-5 |Observer 'network_toggle_observers.NetworkDownDetector' finished.
15:28:29 |moler.net_2 | Thread-5 |Observer 'network_toggle_observers.NetworkDownDetector' finished.
15:28:29 |moler.runner.asyncio-in-thrd:7f521a0d6710 | Thread-5 |feed done & unsubscribing NetworkDownDetector(id:7f521a0e27f0, using ThreadedMolerConnection(id:7f521a0e2470)-->[<bound method Tcp.send of <moler.io.raw.tcp.ThreadedTcp object at 0x7f521a0e2630>>])
15:28:29 |moler.runner.asyncio-in-thrd:7f521a0d6710 | Thread-5 |END OF feed(NetworkDownDetector(id:7f521a0e27f0))
15:28:29 |moler.runner.asyncio-in-thrd:7f521a0d6710 | Thread-5 |feed returning result: 1541514509.311799
15:28:29 |moler.NetworkDownDetector(id:7f521a0e27f0) | Thread-5 |Observer 'network_toggle_observers.NetworkDownDetector' finished.
15:28:29 |moler.net_1 | Thread-5 |Observer 'network_toggle_observers.NetworkDownDetector' finished.
15:28:29 |moler.runner.asyncio-in-thrd:7f521a0d6710 | Thread-3 |NetworkDownDetector(id:7f521a0e27f0) returned 1541514509.311799
15:28:29 |moler.user.app-code | Thread-3 |Network 10.0.2.15 is down from 15:28:29
15:28:29 |moler.user.app-code | Thread-3 |observe 10.0.2.15 on tcp://localhost:5671 using NetworkUpDetector(id:7f521a0e2ba8)
15:28:29 |moler.runner.asyncio-in-thrd:7f521a0e2ba8 | Thread-3 |go background: NetworkUpDetector(id:7f521a0e2ba8, using ThreadedMolerConnection(id:7f521a0e2470)-->[<bound method Tcp.send of <moler.io.raw.tcp.ThreadedTcp object at 0x7f521a0e2630>>])
15:28:29 |moler.runner.asyncio-in-thrd:7f521a0e2ba8 | Thread-4 |NetworkDownDetector(id:7f521a0d6710) returned 1541514509.3102295
15:28:29 |moler.user.app-code | Thread-4 |Network 10.0.2.16 is down from 15:28:29
15:28:29 |moler.user.app-code | Thread-4 |observe 10.0.2.16 on tcp://localhost:5672 using NetworkUpDetector(id:7f521a0d6860)
15:28:29 |moler.runner.asyncio-in-thrd:7f521a0d6860 | Thread-4 |go background: NetworkUpDetector(id:7f521a0d6860, using ThreadedMolerConnection(id:7f521a0e2780)-->[<bound method Tcp.send of <moler.io.raw.tcp.ThreadedTcp object at 0x7f521a0e2978>>])
15:28:29 |asyncio | Thread-5 |poll took 1.560 ms: 1 events
15:28:29 |moler.NetworkUpDetector(id:7f521a0e2ba8) | Thread-5 |Observer 'network_toggle_observers.NetworkUpDetector' started.
15:28:29 |moler.net_1 | Thread-5 |Observer 'network_toggle_observers.NetworkUpDetector' started.
15:28:29 |moler.runner.asyncio-in-thrd:7f521a0d6860 | Thread-5 |START OF feed(NetworkUpDetector(id:7f521a0e2ba8))
15:28:29 |moler.runner.asyncio-in-thrd:7f521a0d6860 | Thread-5 |start feeding(NetworkUpDetector(id:7f521a0e2ba8))
15:28:29 |moler.runner.asyncio-in-thrd:7f521a0d6860 | Thread-5 |feed subscribing for data NetworkUpDetector(id:7f521a0e2ba8, using ThreadedMolerConnection(id:7f521a0e2470)-->[<bound method Tcp.send of <moler.io.raw.tcp.ThreadedTcp object at 0x7f521a0e2630>>])
15:28:29 |moler.runner.asyncio-in-thrd:7f521a0d6860 | Thread-5 |feeding(NetworkUpDetector(id:7f521a0e2ba8)) started
15:28:29 |moler.runner.asyncio-in-thrd:7f521a0d6860 | Thread-3 |go foreground: NetworkUpDetector(id:7f521a0e2ba8, using ThreadedMolerConnection(id:7f521a0e2470)-->[<bound method Tcp.send of <moler.io.raw.tcp.ThreadedTcp object at 0x7f521a0e2630>>]) - await max. None [sec]
15:28:29 |moler.NetworkUpDetector(id:7f521a0d6860) | Thread-5 |Observer 'network_toggle_observers.NetworkUpDetector' started.
15:28:29 |moler.net_2 | Thread-5 |Observer 'network_toggle_observers.NetworkUpDetector' started.
15:28:29 |moler.runner.asyncio-in-thrd:7f521a0d6860 | Thread-5 |START OF feed(NetworkUpDetector(id:7f521a0d6860))
15:28:29 |moler.runner.asyncio-in-thrd:7f521a0d6860 | Thread-5 |start feeding(NetworkUpDetector(id:7f521a0d6860))
15:28:29 |moler.runner.asyncio-in-thrd:7f521a0d6860 | Thread-5 |feed subscribing for data NetworkUpDetector(id:7f521a0d6860, using ThreadedMolerConnection(id:7f521a0e2780)-->[<bound method Tcp.send of <moler.io.raw.tcp.ThreadedTcp object at 0x7f521a0e2978>>])
15:28:29 |moler.runner.asyncio-in-thrd:7f521a0d6860 | Thread-5 |feeding(NetworkUpDetector(id:7f521a0d6860)) started
15:28:29 |moler.runner.asyncio-in-thrd:7f521a0d6860 | Thread-4 |go foreground: NetworkUpDetector(id:7f521a0d6860, using ThreadedMolerConnection(id:7f521a0e2780)-->[<bound method Tcp.send of <moler.io.raw.tcp.ThreadedTcp object at 0x7f521a0e2978>>]) - await max. None [sec]
15:28:30 |moler.net_2 | Thread-7 |ping: sendmsg: Network is unreachable
15:28:30 |moler.net_1 | Thread-9 |ping: sendmsg: Network is unreachable
15:28:31 | |
# Enter a parse tree produced by SystemVerilogParser#specparam_declaration.
def enterSpecparam_declaration(self, ctx:SystemVerilogParser.Specparam_declarationContext):
pass
# Exit a parse tree produced by SystemVerilogParser#specparam_declaration.
def exitSpecparam_declaration(self, ctx:SystemVerilogParser.Specparam_declarationContext):
pass
# Enter a parse tree produced by SystemVerilogParser#inout_declaration.
def enterInout_declaration(self, ctx:SystemVerilogParser.Inout_declarationContext):
pass
# Exit a parse tree produced by SystemVerilogParser#inout_declaration.
def exitInout_declaration(self, ctx:SystemVerilogParser.Inout_declarationContext):
pass
# Enter a parse tree produced by SystemVerilogParser#input_declaration.
def enterInput_declaration(self, ctx:SystemVerilogParser.Input_declarationContext):
pass
# Exit a parse tree produced by SystemVerilogParser#input_declaration.
def exitInput_declaration(self, ctx:SystemVerilogParser.Input_declarationContext):
pass
# Enter a parse tree produced by SystemVerilogParser#output_declaration.
def enterOutput_declaration(self, ctx:SystemVerilogParser.Output_declarationContext):
pass
# Exit a parse tree produced by SystemVerilogParser#output_declaration.
def exitOutput_declaration(self, ctx:SystemVerilogParser.Output_declarationContext):
pass
# Enter a parse tree produced by SystemVerilogParser#interface_port_declaration.
def enterInterface_port_declaration(self, ctx:SystemVerilogParser.Interface_port_declarationContext):
pass
# Exit a parse tree produced by SystemVerilogParser#interface_port_declaration.
def exitInterface_port_declaration(self, ctx:SystemVerilogParser.Interface_port_declarationContext):
pass
# Enter a parse tree produced by SystemVerilogParser#ref_declaration.
def enterRef_declaration(self, ctx:SystemVerilogParser.Ref_declarationContext):
pass
# Exit a parse tree produced by SystemVerilogParser#ref_declaration.
def exitRef_declaration(self, ctx:SystemVerilogParser.Ref_declarationContext):
pass
# Enter a parse tree produced by SystemVerilogParser#data_declaration.
def enterData_declaration(self, ctx:SystemVerilogParser.Data_declarationContext):
pass
# Exit a parse tree produced by SystemVerilogParser#data_declaration.
def exitData_declaration(self, ctx:SystemVerilogParser.Data_declarationContext):
pass
# Enter a parse tree produced by SystemVerilogParser#package_import_declaration.
def enterPackage_import_declaration(self, ctx:SystemVerilogParser.Package_import_declarationContext):
pass
# Exit a parse tree produced by SystemVerilogParser#package_import_declaration.
def exitPackage_import_declaration(self, ctx:SystemVerilogParser.Package_import_declarationContext):
pass
# Enter a parse tree produced by SystemVerilogParser#package_import_item.
def enterPackage_import_item(self, ctx:SystemVerilogParser.Package_import_itemContext):
pass
# Exit a parse tree produced by SystemVerilogParser#package_import_item.
def exitPackage_import_item(self, ctx:SystemVerilogParser.Package_import_itemContext):
pass
# Enter a parse tree produced by SystemVerilogParser#package_export_declaration.
def enterPackage_export_declaration(self, ctx:SystemVerilogParser.Package_export_declarationContext):
pass
# Exit a parse tree produced by SystemVerilogParser#package_export_declaration.
def exitPackage_export_declaration(self, ctx:SystemVerilogParser.Package_export_declarationContext):
pass
# Enter a parse tree produced by SystemVerilogParser#genvar_declaration.
def enterGenvar_declaration(self, ctx:SystemVerilogParser.Genvar_declarationContext):
pass
# Exit a parse tree produced by SystemVerilogParser#genvar_declaration.
def exitGenvar_declaration(self, ctx:SystemVerilogParser.Genvar_declarationContext):
pass
# Enter a parse tree produced by SystemVerilogParser#net_declaration.
def enterNet_declaration(self, ctx:SystemVerilogParser.Net_declarationContext):
pass
# Exit a parse tree produced by SystemVerilogParser#net_declaration.
def exitNet_declaration(self, ctx:SystemVerilogParser.Net_declarationContext):
pass
# Enter a parse tree produced by SystemVerilogParser#type_declaration.
def enterType_declaration(self, ctx:SystemVerilogParser.Type_declarationContext):
pass
# Exit a parse tree produced by SystemVerilogParser#type_declaration.
def exitType_declaration(self, ctx:SystemVerilogParser.Type_declarationContext):
pass
# Enter a parse tree produced by SystemVerilogParser#net_type_declaration.
def enterNet_type_declaration(self, ctx:SystemVerilogParser.Net_type_declarationContext):
pass
# Exit a parse tree produced by SystemVerilogParser#net_type_declaration.
def exitNet_type_declaration(self, ctx:SystemVerilogParser.Net_type_declarationContext):
pass
# Enter a parse tree produced by SystemVerilogParser#lifetime.
def enterLifetime(self, ctx:SystemVerilogParser.LifetimeContext):
pass
# Exit a parse tree produced by SystemVerilogParser#lifetime.
def exitLifetime(self, ctx:SystemVerilogParser.LifetimeContext):
pass
# Enter a parse tree produced by SystemVerilogParser#data_type.
def enterData_type(self, ctx:SystemVerilogParser.Data_typeContext):
pass
# Exit a parse tree produced by SystemVerilogParser#data_type.
def exitData_type(self, ctx:SystemVerilogParser.Data_typeContext):
pass
# Enter a parse tree produced by SystemVerilogParser#data_type_or_implicit.
def enterData_type_or_implicit(self, ctx:SystemVerilogParser.Data_type_or_implicitContext):
pass
# Exit a parse tree produced by SystemVerilogParser#data_type_or_implicit.
def exitData_type_or_implicit(self, ctx:SystemVerilogParser.Data_type_or_implicitContext):
pass
# Enter a parse tree produced by SystemVerilogParser#implicit_data_type.
def enterImplicit_data_type(self, ctx:SystemVerilogParser.Implicit_data_typeContext):
pass
# Exit a parse tree produced by SystemVerilogParser#implicit_data_type.
def exitImplicit_data_type(self, ctx:SystemVerilogParser.Implicit_data_typeContext):
pass
# Enter a parse tree produced by SystemVerilogParser#enum_base_type.
def enterEnum_base_type(self, ctx:SystemVerilogParser.Enum_base_typeContext):
pass
# Exit a parse tree produced by SystemVerilogParser#enum_base_type.
def exitEnum_base_type(self, ctx:SystemVerilogParser.Enum_base_typeContext):
pass
# Enter a parse tree produced by SystemVerilogParser#enum_name_declaration.
def enterEnum_name_declaration(self, ctx:SystemVerilogParser.Enum_name_declarationContext):
pass
# Exit a parse tree produced by SystemVerilogParser#enum_name_declaration.
def exitEnum_name_declaration(self, ctx:SystemVerilogParser.Enum_name_declarationContext):
pass
# Enter a parse tree produced by SystemVerilogParser#class_scope.
def enterClass_scope(self, ctx:SystemVerilogParser.Class_scopeContext):
pass
# Exit a parse tree produced by SystemVerilogParser#class_scope.
def exitClass_scope(self, ctx:SystemVerilogParser.Class_scopeContext):
pass
# Enter a parse tree produced by SystemVerilogParser#class_type.
def enterClass_type(self, ctx:SystemVerilogParser.Class_typeContext):
pass
# Exit a parse tree produced by SystemVerilogParser#class_type.
def exitClass_type(self, ctx:SystemVerilogParser.Class_typeContext):
pass
# Enter a parse tree produced by SystemVerilogParser#integer_type.
def enterInteger_type(self, ctx:SystemVerilogParser.Integer_typeContext):
pass
# Exit a parse tree produced by SystemVerilogParser#integer_type.
def exitInteger_type(self, ctx:SystemVerilogParser.Integer_typeContext):
pass
# Enter a parse tree produced by SystemVerilogParser#integer_atom_type.
def enterInteger_atom_type(self, ctx:SystemVerilogParser.Integer_atom_typeContext):
pass
# Exit a parse tree produced by SystemVerilogParser#integer_atom_type.
def exitInteger_atom_type(self, ctx:SystemVerilogParser.Integer_atom_typeContext):
pass
# Enter a parse tree produced by SystemVerilogParser#integer_vector_type.
def enterInteger_vector_type(self, ctx:SystemVerilogParser.Integer_vector_typeContext):
pass
# Exit a parse tree produced by SystemVerilogParser#integer_vector_type.
def exitInteger_vector_type(self, ctx:SystemVerilogParser.Integer_vector_typeContext):
pass
# Enter a parse tree produced by SystemVerilogParser#non_integer_type.
def enterNon_integer_type(self, ctx:SystemVerilogParser.Non_integer_typeContext):
pass
# Exit a parse tree produced by SystemVerilogParser#non_integer_type.
def exitNon_integer_type(self, ctx:SystemVerilogParser.Non_integer_typeContext):
pass
# Enter a parse tree produced by SystemVerilogParser#net_type.
def enterNet_type(self, ctx:SystemVerilogParser.Net_typeContext):
pass
# Exit a parse tree produced by SystemVerilogParser#net_type.
def exitNet_type(self, ctx:SystemVerilogParser.Net_typeContext):
pass
# Enter a parse tree produced by SystemVerilogParser#net_port_type.
def enterNet_port_type(self, ctx:SystemVerilogParser.Net_port_typeContext):
pass
# Exit a parse tree produced by SystemVerilogParser#net_port_type.
def exitNet_port_type(self, ctx:SystemVerilogParser.Net_port_typeContext):
pass
# Enter a parse tree produced by SystemVerilogParser#variable_port_type.
def enterVariable_port_type(self, ctx:SystemVerilogParser.Variable_port_typeContext):
pass
# Exit a parse tree produced by SystemVerilogParser#variable_port_type.
def exitVariable_port_type(self, ctx:SystemVerilogParser.Variable_port_typeContext):
pass
# Enter a parse tree produced by SystemVerilogParser#var_data_type.
def enterVar_data_type(self, ctx:SystemVerilogParser.Var_data_typeContext):
pass
# Exit a parse tree produced by SystemVerilogParser#var_data_type.
def exitVar_data_type(self, ctx:SystemVerilogParser.Var_data_typeContext):
pass
# Enter a parse tree produced by SystemVerilogParser#signing.
def enterSigning(self, ctx:SystemVerilogParser.SigningContext):
pass
# Exit a parse tree produced by SystemVerilogParser#signing.
def exitSigning(self, ctx:SystemVerilogParser.SigningContext):
pass
# Enter a parse tree produced by SystemVerilogParser#simple_type.
def enterSimple_type(self, ctx:SystemVerilogParser.Simple_typeContext):
pass
# Exit a parse tree produced by SystemVerilogParser#simple_type.
def exitSimple_type(self, ctx:SystemVerilogParser.Simple_typeContext):
pass
# Enter a parse tree produced by SystemVerilogParser#struct_union_member.
def enterStruct_union_member(self, ctx:SystemVerilogParser.Struct_union_memberContext):
pass
# Exit a parse tree produced by SystemVerilogParser#struct_union_member.
def exitStruct_union_member(self, ctx:SystemVerilogParser.Struct_union_memberContext):
pass
# Enter a parse tree produced by SystemVerilogParser#data_type_or_void.
def enterData_type_or_void(self, ctx:SystemVerilogParser.Data_type_or_voidContext):
pass
# Exit a parse tree produced by SystemVerilogParser#data_type_or_void.
def exitData_type_or_void(self, ctx:SystemVerilogParser.Data_type_or_voidContext):
pass
# Enter a parse tree produced by SystemVerilogParser#struct_union.
def enterStruct_union(self, ctx:SystemVerilogParser.Struct_unionContext):
pass
# Exit a parse tree produced by SystemVerilogParser#struct_union.
def exitStruct_union(self, ctx:SystemVerilogParser.Struct_unionContext):
pass
# Enter a parse tree produced by SystemVerilogParser#type_reference.
def enterType_reference(self, ctx:SystemVerilogParser.Type_referenceContext):
pass
# Exit a parse tree produced by SystemVerilogParser#type_reference.
def exitType_reference(self, ctx:SystemVerilogParser.Type_referenceContext):
pass
# Enter a parse tree produced by SystemVerilogParser#drive_strength.
def enterDrive_strength(self, ctx:SystemVerilogParser.Drive_strengthContext):
pass
# Exit a parse tree produced by SystemVerilogParser#drive_strength.
def exitDrive_strength(self, ctx:SystemVerilogParser.Drive_strengthContext):
pass
# Enter a parse tree produced by SystemVerilogParser#strength0.
def enterStrength0(self, ctx:SystemVerilogParser.Strength0Context):
pass
# Exit a parse tree produced by SystemVerilogParser#strength0.
def exitStrength0(self, ctx:SystemVerilogParser.Strength0Context):
pass
# Enter a parse tree produced by SystemVerilogParser#strength1.
def enterStrength1(self, ctx:SystemVerilogParser.Strength1Context):
pass
# Exit a parse tree produced by SystemVerilogParser#strength1.
def exitStrength1(self, ctx:SystemVerilogParser.Strength1Context):
pass
# Enter a parse tree produced by SystemVerilogParser#charge_strength.
def enterCharge_strength(self, ctx:SystemVerilogParser.Charge_strengthContext):
pass
# Exit a parse tree produced by SystemVerilogParser#charge_strength.
def exitCharge_strength(self, ctx:SystemVerilogParser.Charge_strengthContext):
pass
# Enter a parse tree produced by SystemVerilogParser#delay3.
def enterDelay3(self, ctx:SystemVerilogParser.Delay3Context):
pass
# Exit a parse tree produced by SystemVerilogParser#delay3.
def exitDelay3(self, ctx:SystemVerilogParser.Delay3Context):
pass
# Enter a parse tree produced by SystemVerilogParser#delay2.
def enterDelay2(self, ctx:SystemVerilogParser.Delay2Context):
pass
# Exit a parse tree produced by SystemVerilogParser#delay2.
def exitDelay2(self, ctx:SystemVerilogParser.Delay2Context):
pass
# Enter a parse tree produced by SystemVerilogParser#delay_value.
def enterDelay_value(self, ctx:SystemVerilogParser.Delay_valueContext):
pass
# Exit a parse tree produced by SystemVerilogParser#delay_value.
def exitDelay_value(self, ctx:SystemVerilogParser.Delay_valueContext):
pass
# Enter a parse tree produced by SystemVerilogParser#list_of_defparam_assignments.
def enterList_of_defparam_assignments(self, ctx:SystemVerilogParser.List_of_defparam_assignmentsContext):
pass
# Exit a parse tree produced by SystemVerilogParser#list_of_defparam_assignments.
def exitList_of_defparam_assignments(self, ctx:SystemVerilogParser.List_of_defparam_assignmentsContext):
pass
# Enter a parse tree produced by SystemVerilogParser#list_of_genvar_identifiers.
def enterList_of_genvar_identifiers(self, ctx:SystemVerilogParser.List_of_genvar_identifiersContext):
pass
# Exit a parse tree produced by SystemVerilogParser#list_of_genvar_identifiers.
def exitList_of_genvar_identifiers(self, ctx:SystemVerilogParser.List_of_genvar_identifiersContext):
pass
# Enter a parse tree produced by SystemVerilogParser#list_of_interface_identifiers.
def enterList_of_interface_identifiers(self, ctx:SystemVerilogParser.List_of_interface_identifiersContext):
pass
# Exit a parse tree produced by SystemVerilogParser#list_of_interface_identifiers.
def exitList_of_interface_identifiers(self, ctx:SystemVerilogParser.List_of_interface_identifiersContext):
pass
# Enter a parse tree produced by SystemVerilogParser#list_of_net_decl_assignments.
def enterList_of_net_decl_assignments(self, ctx:SystemVerilogParser.List_of_net_decl_assignmentsContext):
pass
# Exit a parse tree produced by SystemVerilogParser#list_of_net_decl_assignments.
def exitList_of_net_decl_assignments(self, ctx:SystemVerilogParser.List_of_net_decl_assignmentsContext):
pass
# Enter a parse tree produced by SystemVerilogParser#list_of_param_assignments.
def enterList_of_param_assignments(self, ctx:SystemVerilogParser.List_of_param_assignmentsContext):
pass
# Exit a parse tree produced by SystemVerilogParser#list_of_param_assignments.
def exitList_of_param_assignments(self, ctx:SystemVerilogParser.List_of_param_assignmentsContext):
pass
# Enter a parse tree produced by SystemVerilogParser#list_of_port_identifiers.
def enterList_of_port_identifiers(self, ctx:SystemVerilogParser.List_of_port_identifiersContext):
pass
# Exit a parse tree produced by SystemVerilogParser#list_of_port_identifiers.
def exitList_of_port_identifiers(self, ctx:SystemVerilogParser.List_of_port_identifiersContext):
pass
# Enter a parse tree produced by SystemVerilogParser#list_of_udp_port_identifiers.
def enterList_of_udp_port_identifiers(self, ctx:SystemVerilogParser.List_of_udp_port_identifiersContext):
pass
# Exit a parse tree produced by SystemVerilogParser#list_of_udp_port_identifiers.
def exitList_of_udp_port_identifiers(self, ctx:SystemVerilogParser.List_of_udp_port_identifiersContext):
pass
# Enter a parse tree produced by SystemVerilogParser#list_of_specparam_assignments.
def enterList_of_specparam_assignments(self, ctx:SystemVerilogParser.List_of_specparam_assignmentsContext):
pass
# Exit a parse tree produced by SystemVerilogParser#list_of_specparam_assignments.
def exitList_of_specparam_assignments(self, ctx:SystemVerilogParser.List_of_specparam_assignmentsContext):
pass
# Enter a parse tree produced by SystemVerilogParser#list_of_tf_variable_identifiers.
def enterList_of_tf_variable_identifiers(self, ctx:SystemVerilogParser.List_of_tf_variable_identifiersContext):
pass
# Exit a parse tree produced by SystemVerilogParser#list_of_tf_variable_identifiers.
def exitList_of_tf_variable_identifiers(self, ctx:SystemVerilogParser.List_of_tf_variable_identifiersContext):
pass
# Enter a parse tree produced by SystemVerilogParser#list_of_type_assignments.
def enterList_of_type_assignments(self, ctx:SystemVerilogParser.List_of_type_assignmentsContext):
pass
# Exit a parse tree produced by SystemVerilogParser#list_of_type_assignments.
def exitList_of_type_assignments(self, ctx:SystemVerilogParser.List_of_type_assignmentsContext):
pass
# Enter a parse tree produced by SystemVerilogParser#list_of_variable_decl_assignments.
| |
<reponame>newbooks/SAMPL7<gh_stars>10-100
#!/usr/bin/env python
# Credit:
# This adapted by <NAME> from <NAME>'s file of the same name which he wrote for SAMPL6
# at https://github.com/samplchallenges/SAMPL6/blob/master/host_guest/Analysis/ExperimentalMeasurements/generate_tables.py
# He gets credit for anything good about it; I deserve blame for any problems.
# =============================================================================
# GLOBAL IMPORTS
# =============================================================================
import os
import math
import csv
import json
from collections import OrderedDict
import numpy as np
from simtk import unit as u
# =============================================================================
# CONSTANTS
# =============================================================================
T = 298 * u.kelvin
R = u.MOLAR_GAS_CONSTANT_R
RELATIVE_TITRANT_CONC_ERROR = 0.03
CLIP_GUESTS_SMILES_PATH = '../../Isaacs_clip/guest_files/trimertrip_guest_smiles.txt'
GDCC_GUESTS_SMILES_PATH = '../../GDCC_and_guests/guest_files/GDCC_guest_smiles.txt'
CD_GUESTS_SMILES_PATH = '../../cyclodextrin_derivatives/guest_files/cyclodextrin_guest_smiles.txt'
CLIP_GUESTS_NAMES_PATH = '../../Isaacs_clip/guest_files/trimertrip_guest_names.txt'
GDCC_GUESTS_NAMES_PATH = '../../GDCC_and_guests/guest_files/GDCC_guest_names.txt'
CD_GUESTS_NAMES_PATH = '../../cyclodextrin_derivatives/guest_files/cyclodextrin_guest_names.txt'
CD_HOST_NAMES = ['bCD', 'MGLab_8', 'MGLab_9','MGLab_19', 'MGLab_23', 'MGLab_24', 'MGLab_34', 'MGLab_35', 'MGLab_36']
# Experimental results as provided by the Gibb, Isaacs and Gilson groups.
# The error is relative. None means that the error is <1%.
EXPERIMENTAL_DATA = OrderedDict([
('clip-g1', OrderedDict([
('Kd_1', 34.2e-6 * u.molar), ('dKd_1', 3.29e-6 * u.molar),
('DH_1', -6.03 * u.kilocalories_per_mole), ('dDH_1', 0.260 * u.kilocalories_per_mole),
('Kd_2', 29.6e-6 * u.molar), ('dKd_2', 7.70e-6 * u.molar),
('DH_2', -6.14 * u.kilocalories_per_mole), ('dDH_2', 0.696 * u.kilocalories_per_mole),
('TDS', None), ('dTDS', None),
('n', np.mean([0.899, 0.825]))
])),
('clip-g2', OrderedDict([
('Kd_1', 749e-9 * u.molar), ('dKd_1', 17.7e-9 * u.molar),
('DH_1', -8.58 * u.kilocalories_per_mole), ('dDH_1', 0.021 * u.kilocalories_per_mole),
('Kd_2', 829e-9 * u.molar), ('dKd_2', 40.8e-9 * u.molar),
('DH_2', -8.95 * u.kilocalories_per_mole), ('dDH_2', 0.053 * u.kilocalories_per_mole),
('TDS', None), ('dTDS', None),
('n', np.mean([0.891, 1.11]))
])),
('clip-g3', OrderedDict([
('Kd_1', 43.5e-9 * u.molar), ('dKd_1', 3.39e-9 * u.molar),
('DH_1', -10.8 * u.kilocalories_per_mole), ('dDH_1', 0.044 * u.kilocalories_per_mole),
('Kd_2', 41.3e-9 * u.molar), ('dKd_2', 3.24e-9 * u.molar),
('DH_2', -10.9 * u.kilocalories_per_mole), ('dDH_2', 0.043 * u.kilocalories_per_mole),
('TDS', None), ('dTDS', None),
('n', np.mean([0.974, 0.831]))
])),
('clip-g15', OrderedDict([
('Kd_1', 18.3e-9 * u.molar), ('dKd_1', 1.00e-9 * u.molar),
('DH_1', -12.8 * u.kilocalories_per_mole), ('dDH_1', 0.033 * u.kilocalories_per_mole),
('Kd_2', 20.0e-9 * u.molar), ('dKd_2', 8.74e-10 * u.molar),
('DH_2', -12.7 * u.kilocalories_per_mole), ('dDH_2', 0.028 * u.kilocalories_per_mole),
('TDS', None), ('dTDS', None),
('n', np.mean([0.917, 1.02]))
])),
('clip-g12', OrderedDict([
('Kd_1', 830e-9 * u.molar), ('dKd_1', 23.3e-9 * u.molar),
('DH_1', -8.54 * u.kilocalories_per_mole), ('dDH_1', 0.027 * u.kilocalories_per_mole),
('Kd_2', 827e-9 * u.molar), ('dKd_2', 31.0e-9 * u.molar),
('DH_2', -8.25 * u.kilocalories_per_mole), ('dDH_2', 0.034 * u.kilocalories_per_mole),
('TDS', None), ('dTDS', None),
('n', np.mean([0.970, 0.907]))
])),
('clip-g5', OrderedDict([
('Kd_1', 7.07e-9 * u.molar), ('dKd_1', 1.13e-9 * u.molar),
('DH_1', -11.5 * u.kilocalories_per_mole), ('dDH_1', 0.094 * u.kilocalories_per_mole),
('Kd_2', 6.63e-9 * u.molar), ('dKd_2', 8.99e-10 * u.molar),
('DH_2', -11.3 * u.kilocalories_per_mole), ('dDH_2', 0.07 * u.kilocalories_per_mole),
('TDS', None), ('dTDS', None),
('n', np.mean([0.950, 0.837]))
])),
('clip-g16', OrderedDict([
('Kd_1', 3.55e-9 * u.molar), ('dKd_1', 7.80e-10 * u.molar),
('DH_1', -11.3 * u.kilocalories_per_mole), ('dDH_1', 0.068 * u.kilocalories_per_mole),
('Kd_2', 3.27e-9 * u.molar), ('dKd_2', 8.48e-10 * u.molar),
('DH_2', -11.2 * u.kilocalories_per_mole), ('dDH_2', 0.072 * u.kilocalories_per_mole),
('TDS', None), ('dTDS', None),
('n', np.mean([0.925, 0.856]))
])),
('clip-g17', OrderedDict([
('Kd_1', 1.97e-9 * u.molar), ('dKd_1', 1.06e-9 * u.molar),
('DH_1', -10.4 * u.kilocalories_per_mole), ('dDH_1', 0.123 * u.kilocalories_per_mole),
('Kd_2', 2.20e-9 * u.molar), ('dKd_2', 5.76e-10 * u.molar),
('DH_2', -10.4 * u.kilocalories_per_mole), ('dDH_2', 0.064 * u.kilocalories_per_mole),
('TDS', None), ('dTDS', None),
('n', np.mean([0.968, 0.979]))
])),
('clip-g9', OrderedDict([
('Kd_1', 2.80e-6 * u.molar), ('dKd_1', 113e-9 * u.molar),
('DH_1', -4.83 * u.kilocalories_per_mole), ('dDH_1', 0.036 * u.kilocalories_per_mole),
('Kd_2', 2.79e-6 * u.molar), ('dKd_2', 162e-9 * u.molar),
('DH_2', -4.72 * u.kilocalories_per_mole), ('dDH_2', 0.046 * u.kilocalories_per_mole),
('TDS', None), ('dTDS', None),
('n', np.mean([1.07, 0.829]))
])),
('clip-g6', OrderedDict([
('Kd_1', 88.3e-9 * u.molar), ('dKd_1', 9.44e-9 * u.molar),
('DH_1', -10.1 * u.kilocalories_per_mole), ('dDH_1', 0.119 * u.kilocalories_per_mole),
('Kd_2', 97.0e-9 * u.molar), ('dKd_2', 13.6e-9 * u.molar),
('DH_2', -10.3 * u.kilocalories_per_mole), ('dDH_2', 0.165 * u.kilocalories_per_mole),
('TDS', None), ('dTDS', None),
('n', np.mean([0.848, 0.814]))
])),
('clip-g11', OrderedDict([
('Kd_1', 245e-9 * u.molar), ('dKd_1', 22.3e-9 * u.molar),
('DH_1', -7.41 * u.kilocalories_per_mole), ('dDH_1', 0.084 * u.kilocalories_per_mole),
('Kd_2', 238e-9 * u.molar), ('dKd_2', 22.2e-9 * u.molar),
('DH_2', -7.35 * u.kilocalories_per_mole), ('dDH_2', 0.084 * u.kilocalories_per_mole),
('TDS', None), ('dTDS', None),
('n', np.mean([0.848, 0.846]))
])),
('clip-g10', OrderedDict([
('Kd_1', 902e-9 * u.molar), ('dKd_1', 64.8e-9 * u.molar),
('DH_1', -5.88 * u.kilocalories_per_mole), ('dDH_1', 0.049 * u.kilocalories_per_mole),
('Kd_2', 1.17e-6 * u.molar), ('dKd_2', 72.6e-9 * u.molar),
('DH_2', -5.80 * u.kilocalories_per_mole), ('dDH_2', 0.047 * u.kilocalories_per_mole),
('TDS', None), ('dTDS', None),
('n', np.mean([0.960, 1.02]))
])),
('clip-g8', OrderedDict([
('Kd_1', 114e-9 * u.molar), ('dKd_1', 6.79e-9 * u.molar),
('DH_1', -10.5 * u.kilocalories_per_mole), ('dDH_1', 0.044 * u.kilocalories_per_mole),
('Kd_2', 120e-9 * u.molar), ('dKd_2', 5.34e-9 * u.molar),
('DH_2', -10.6 * u.kilocalories_per_mole), ('dDH_2', 0.033 * u.kilocalories_per_mole),
('TDS', None), ('dTDS', None),
('n', np.mean([0.910, 0.894]))
])),
('clip-g18', OrderedDict([
('Kd_1', 17.2e-9 * u.molar), ('dKd_1', 1.42e-9 * u.molar),
('DH_1', -12.4 * u.kilocalories_per_mole), ('dDH_1', 0.045 * u.kilocalories_per_mole),
('Kd_2', 19.8e-9 * u.molar), ('dKd_2', 2.34e-9 * u.molar),
('DH_2', -12.3 * u.kilocalories_per_mole), ('dDH_2', 0.069 * u.kilocalories_per_mole),
('TDS', None), ('dTDS', None),
('n', np.mean([0.896, 1.00]))
])),
('clip-g19', OrderedDict([
('Kd_1', 2.80e-9 * u.molar), ('dKd_1', 1.53e-10 * u.molar),
('DH_1', -13.7 * u.kilocalories_per_mole), ('dDH_1', 0.039 * u.kilocalories_per_mole),
('Kd_2', 2.74e-9 * u.molar), ('dKd_2', 6.04e-10 * u.molar),
('DH_2', -13.6 * u.kilocalories_per_mole), ('dDH_2', 0.144 * u.kilocalories_per_mole),
('TDS', None), ('dTDS', None),
('n', np.mean([0.755, 0.828]))
])),
('clip-g7', OrderedDict([
('Kd_1', 16.8e-6 * u.molar), ('dKd_1', 652e-9 * u.molar),
('DH_1', -6.61 * u.kilocalories_per_mole), ('dDH_1', 0.088 * u.kilocalories_per_mole),
('Kd_2', 17.2e-6 * u.molar), ('dKd_2', 1.24e-6 * u.molar),
('DH_2', -6.80 * u.kilocalories_per_mole), ('dDH_2', 0.170 * u.kilocalories_per_mole),
('TDS', None), ('dTDS', None),
('n', np.mean([0.839, 0.813]))
])),
('OA-g1', OrderedDict([
('DG', -20.8 * u.kilojoules_per_mole), ('dDG', 0.1 * u.kilojoules_per_mole),
('DH', -23.2 * u.kilojoules_per_mole), ('dDH', 0.4 * u.kilojoules_per_mole),
('TDS', -2.4 * u.kilojoules_per_mole), ('dTDS', 0.3 * u.kilojoule_per_mole),
('n', 1)
])),
('OA-g2', OrderedDict([
('DG', -28.9 * u.kilojoules_per_mole), ('dDG', 0.1 * u.kilojoules_per_mole),
('DH', -40.2 * u.kilojoules_per_mole), ('dDH', 1.1 * u.kilojoules_per_mole),
('TDS', -11.0 * u.kilojoules_per_mole), ('dTDS', 1.0 * u.kilojoule_per_mole),
('n', 1)
])),
('OA-g3', OrderedDict([
('DG', -33.9 * u.kilojoules_per_mole), ('dDG', 0.1 * u.kilojoules_per_mole),
('DH', -50.2 * u.kilojoules_per_mole), ('dDH', 0.1 * u.kilojoules_per_mole),
('TDS', -16.3 * u.kilojoules_per_mole), ('dTDS', 0.1 * u.kilojoule_per_mole),
('n', 1)
])),
('OA-g4', OrderedDict([
('DG', -28.3 * u.kilojoules_per_mole), ('dDG', 0.2 * u.kilojoules_per_mole),
('DH', -28.0 * u.kilojoules_per_mole), ('dDH', 0.7 * u.kilojoules_per_mole),
('TDS', 0.3 * u.kilojoules_per_mole), ('dTDS', 0.5 * u.kilojoule_per_mole),
('n', 1)
])),
('OA-g5', OrderedDict([
('DG', -19.8 * u.kilojoules_per_mole), ('dDG', 0.1 * u.kilojoules_per_mole),
('DH', -31.3 * u.kilojoules_per_mole), ('dDH', 0.2 * u.kilojoules_per_mole),
('TDS', -11.5 * u.kilojoules_per_mole), ('dTDS', 0.2 * u.kilojoule_per_mole),
('n', 1)
])),
('OA-g6', OrderedDict([
('DG', -20.8 * u.kilojoules_per_mole), ('dDG', 0.1 * u.kilojoules_per_mole),
('DH', -30.5 * u.kilojoules_per_mole), ('dDH', 1.4 * u.kilojoules_per_mole),
('TDS', -9.6 * u.kilojoules_per_mole), ('dTDS', 1.4 * u.kilojoule_per_mole),
('n', 1)
])),
('OA-g7', OrderedDict([
('DG', -25.4 * u.kilojoules_per_mole), ('dDG', 0.2 * u.kilojoules_per_mole),
('DH', -24.0 * u.kilojoules_per_mole), ('dDH', 0.7 * u.kilojoules_per_mole),
('TDS', 1.4 * u.kilojoules_per_mole), ('dTDS', 0.5 * u.kilojoule_per_mole),
('n', 1)
])),
('OA-g8', OrderedDict([
('DG', -34.5 * u.kilojoules_per_mole), ('dDG', 0.1 * u.kilojoules_per_mole),
('DH', -32.7 * u.kilojoules_per_mole), ('dDH', 0.8 * u.kilojoules_per_mole),
('TDS', 1.7 * u.kilojoules_per_mole), ('dTDS', 0.6 * u.kilojoule_per_mole),
('n', 1)
])),
('exoOA-g1', OrderedDict([
('DG', 'NaN'), ('dDG', 'NaN'),
('DH', 'NaN'), ('dDH', 'NaN'),
('TDS', 'NaN'), ('dTDS', 'NaN'),
('n', 1)
])),
('exoOA-g2', OrderedDict([
('DG', -5.5 * u.kilojoules_per_mole), ('dDG', 1.2 * u.kilojoules_per_mole),
('DH', 'NaN'), ('dDH', 'NaN'),
('TDS', 'NaN'), ('dTDS', 'NaN'),
('n', 1)
])),
('exoOA-g3', OrderedDict([
('DG', -14.1 * u.kilojoules_per_mole), ('dDG', 0.3 * u.kilojoules_per_mole),
('DH', -25.2 * u.kilojoules_per_mole), ('dDH', 0.6 * u.kilojoules_per_mole),
('TDS', -11.1 * u.kilojoules_per_mole), ('dTDS', 0.3 * u.kilojoules_per_mole),
('n', 1)
])),
('exoOA-g4', OrderedDict([
('DG', -15.1 * u.kilojoules_per_mole), ('dDG', 0.1 * u.kilojoules_per_mole),
('DH', -30.5 * u.kilojoules_per_mole), ('dDH', 2.9 * u.kilojoules_per_mole),
('TDS', -15.4 * u.kilojoules_per_mole), ('dTDS', 2.8 * u.kilojoules_per_mole),
('n', 1)
])),
('exoOA-g5', OrderedDict([
('DG', -23.3 * u.kilojoules_per_mole), ('dDG', 0.1 * u.kilojoules_per_mole),
('DH', -25.8 * u.kilojoules_per_mole), ('dDH', 0.1 * u.kilojoules_per_mole),
('TDS', -2.5 * u.kilojoules_per_mole), ('dTDS', 0.1 * u.kilojoules_per_mole),
('n', 1)
])),
('exoOA-g6', OrderedDict([
('DG', -24.4 * u.kilojoules_per_mole), ('dDG', 0.1 * u.kilojoules_per_mole),
('DH', -13.6 * u.kilojoules_per_mole), ('dDH', 0.1 * u.kilojoules_per_mole),
('TDS', 10.8 * u.kilojoules_per_mole), ('dTDS', 0.1 * u.kilojoules_per_mole),
('n', 1)
])),
('exoOA-g7', OrderedDict([
('DG', -29.2 * u.kilojoules_per_mole), ('dDG', 0.4 * u.kilojoules_per_mole),
('DH', -20.8 * u.kilojoules_per_mole), ('dDH', 0.3 * u.kilojoules_per_mole),
('TDS', 8.4 * u.kilojoules_per_mole), ('dTDS', 0.2 * u.kilojoules_per_mole),
('n', 1)
])),
('exoOA-g8', OrderedDict([
('DG', -32.1 * u.kilojoules_per_mole), ('dDG', 0.1 * u.kilojoules_per_mole),
('DH', -21.1 * u.kilojoules_per_mole), ('dDH', 0.2 * u.kilojoules_per_mole),
('TDS', 11.0 * u.kilojoules_per_mole), ('dTDS', 0.1 * u.kilojoules_per_mole),
('n', 1)
])),
('bCD-g1', OrderedDict([
('Ka_1', 2025.31 / u.molar), ('dKa_1', 68.11 / u.molar),
('Ka_2', 2098.64 / u.molar), ('dKa_2', 66.95 / u.molar),
('DH_1', -10.90 * u.kilojoules_per_mole), ('dDH_1', 0.44 * u.kilojoules_per_mole),
('DH_2', -10.53 * u.kilojoules_per_mole), ('dDH_2', 0.47 * u.kilojoules_per_mole),
#('TDS_1', 8.09 * u.kilojoules_per_mole), ('dTDS_1', 0.45 * u.kilojoules_per_mole),
#('TDS_2', 8.56 * u.kilojoules_per_mole), ('dTDS_2', 0.47 * u.kilojoules_per_mole),
('TDS', None), ('dTDS', None),
| |
'''
'''
import os
import sys
import h5py
import numpy as np
from scipy.stats import chi2
np.seterr(divide='ignore', invalid='ignore')
# -- abcpmc --
import abcpmc
from abcpmc import mpi_util
# -- galpopfm --
from . import dustfm as dustFM
from . import measure_obs as measureObs
dat_dir = os.environ['GALPOPFM_DIR']
def distance_metric(x_obs, x_model, method='chi2', x_err=None):
''' distance metric between forward model m(theta) and observations
notes
-----
* simple L2 norm between the 3D histogram of [Rmag, Balmer, FUV-NUV]
'''
if x_err is None:
x_err = [1. for _x in x_obs]
if method == 'chi2': # chi-squared
rho = [np.sum((_obs - _mod)**2/_err**2)
for _obs, _mod, _err in zip(x_obs, x_model, x_err)]
elif method == 'L2': # chi-squared
rho = [np.sum((_obs - _mod)**2)
for _obs, _mod, _err in zip(x_obs, x_model, x_err)]
elif method == 'L1': # L1 morm
rho = [np.sum(np.abs(_obs - _mod))
for _obs, _mod, _err in zip(x_obs, x_model, x_err)]
else:
raise NotImplementedError
return rho
def sumstat_obs(statistic='2d', return_bins=False):
''' summary statistics for SDSS observations is the 3D histgram of
[M_r, G-R, FUV - NUV].
notes
-----
* 09/22/2020: observation summary statistics updated to Jeremy's SDSS
catalog (centrals *and* satellites) with NSA absolute magnitudes
* see `nb/observables.ipynb` to see exactly how the summary statistic is
calculated.
'''
if statistic == '1d':
r_edges, gr_edges, fn_edges, x_gr, x_fn, _, _ = np.load(os.path.join(dat_dir, 'obs',
'tinker.Mr_20.Mr.GR.FUVNUV.npy'),
allow_pickle=True)
dgr = gr_edges[1] - gr_edges[0]
nbar = dgr * np.sum(x_gr)
x_obs = [nbar, x_gr, x_fn]
elif statistic == '2d':
r_edges, gr_edges, fn_edges, x_gr, x_fn, _, _ = np.load(os.path.join(dat_dir, 'obs',
'tinker.Mr_20.Mr_GR.Mr_FUVNUV.npy'),
allow_pickle=True)
dr = r_edges[1] - r_edges[0]
dgr = gr_edges[1] - gr_edges[0]
nbar = dr * dgr * np.sum(x_gr),
x_obs = [nbar, x_gr, x_fn]
elif statistic == '3d':
r_edges, gr_edges, fn_edges, _x_obs, _ = np.load(os.path.join(dat_dir, 'obs',
'tinker.Mr_20.Mr_GR_FUVNUV.npy'),
allow_pickle=True)
dr = r_edges[1] - r_edges[0]
dgr = gr_edges[1] - gr_edges[0]
dfn = fn_edges[1] - fn_edges[0]
nbar = dr * dgr * dfn * np.sum(_x_obs)
x_obs = [nbar, _x_obs]
if return_bins:
return r_edges, gr_edges, fn_edges, x_obs
return x_obs
def sumstat_model(theta, sed=None, dem='slab_calzetti', f_downsample=1.,
statistic='2d', noise=True, seed=None, return_datavector=False,
sfr0_prescription='adhoc'):
''' calculate summary statistics for forward model m(theta)
:param theta:
array of input parameters
:param sed:
dictionary with SEDs of **central** galaxies
:param dem:
string specifying the dust empirical model
:param f_downsample:
if f_downsample > 1., then the SED dictionary is downsampled.
:param sfr0_prescription:
prescription for dealing with SFR=0 galaxies
notes
-----
* 09/22/2020: simple noise model implemented
* 4/22/2020: extra_data kwarg added. This is to pass pre-sampled
observables for SFR = 0 galaxies
'''
# don't touch these values! they are set to agree with the binning of
# obersvable
nbins = [8, 400, 200]
ranges = [(20, 24), (-5., 20.), (-5, 45.)]
dRmag = 0.5
dGR = 0.0625
dfuvnuv = 0.25
# SFR=0 galaxies
sfr0 = (sed['logsfr.inst'] == -999)
if sfr0_prescription == 'adhoc':
raise ValueError
#R_mag_sfr0, G_R_sfr0, FUV_NUV_sfr0 = _observable_zeroSFR(
# sed['wave'],
# sed['sed_noneb'][sfr0,:])
elif sfr0_prescription == 'sfrmin':
logsfr_min = sed['logsfr.inst'][~sfr0].min() # minimum SFR
print(logsfr_min)
sed['logsfr.inst'][sfr0] = logsfr_min
else:
raise NotImplementedError
sed_dusty = dustFM.Attenuate(
theta,
sed['wave'],
sed['sed_noneb'],
sed['sed_onlyneb'],
sed['logmstar'],
sed['logsfr.inst'],
dem=dem)
# observational measurements
F_mag = measureObs.AbsMag_sed(sed['wave'], sed_dusty, band='galex_fuv')
N_mag = measureObs.AbsMag_sed(sed['wave'], sed_dusty, band='galex_nuv')
G_mag = measureObs.AbsMag_sed(sed['wave'], sed_dusty, band='g_sdss')
R_mag = measureObs.AbsMag_sed(sed['wave'], sed_dusty, band='r_sdss')
# apply FUV and NUV cut
uv_cut = (F_mag < -13.5) & (N_mag < -14)
F_mag = F_mag[uv_cut]
N_mag = N_mag[uv_cut]
G_mag = G_mag[uv_cut]
R_mag = R_mag[uv_cut]
# calculate color
FUV_NUV = F_mag - N_mag
G_R = G_mag - R_mag
if sfr0_prescription == 'adhoc':
# append sampled SFR=0 observables to data vector
R_mag = np.concatenate([R_mag, R_mag_sfr0])
G_R = np.concatenate([G_R, G_R_sfr0])
FUV_NUV = np.concatenate([FUV_NUV, FUV_NUV_sfr0])
n_gal = len(R_mag)
if noise:
if seed is not None:
np.random.seed(seed)
# noise model (simplest model)
sig_R = chi2.rvs(3, loc=0.02, scale=0.00003, size=n_gal)
sig_FN = chi2.rvs(2, loc=0.05, scale=0.05, size=n_gal)
sig_GR = chi2.rvs(3, size=n_gal) * (0.00001 * (R_mag + 20.1) + 0.00005)\
+ (0.000025 * (R_mag + 20.1) + 0.02835)
R_mag += np.random.normal(size=n_gal) * sig_R
FUV_NUV += np.random.normal(size=n_gal) * sig_FN
G_R += np.random.normal(size=n_gal) * sig_GR
data_vector = np.array([-1.*R_mag, G_R, FUV_NUV]).T
if return_datavector:
return data_vector.T, uv_cut
Nbins, _ = np.histogramdd(data_vector, bins=nbins, range=ranges)
# volume of simulation
vol = {'simba': 100.**3, 'tng': 75.**3, 'eagle': 67.77**3}[sed['sim']]
x_model = Nbins.astype(float) / vol / dRmag / dGR / dfuvnuv / f_downsample
nbar = dRmag * dGR * dfuvnuv * np.sum(x_model)
if statistic == '3d':
return [nbar, x_model]
elif statistic == '2d':
x_r_gr = dfuvnuv * np.sum(x_model, axis=2)
x_r_fn = dGR * np.sum(x_model, axis=1)
return [nbar, x_r_gr, x_r_fn]
elif statistic == '1d':
x_gr = dRmag * np.sum(dfuvnuv * np.sum(x_model, axis=2), axis=0)
x_fn = dRmag * np.sum(dGR * np.sum(x_model, axis=1), axis=0)
return [nbar, x_gr, x_fn]
def _observable_zeroSFR(wave, sed):
''' for SFR = 0 galaxies, sample G-R and FUV-NUV color directly from G-R
and FUV-NUV distributions of quiescent SDSS galaxies. This is to remove
these galaxies from consideration in the inference.
See `nb/sdss_quiescent_sumstat.ipynb` for details.
notes
-----
* 09/22/2020: updated the quiescent distributions since the observational
dataset has been updated.
* in principle, the G-R and FUV-NUV sampling can done for R bins, but at
the moment it does not.
* this only runs once so its not optimized in any way
'''
ngal = sed.shape[0]
# read in G-R and FUV-NUV distributions of SDSS quiescent galaxies
gr_edges, gr_nbins = np.load(os.path.join(dat_dir, 'obs',
'tinker.Mr_20.quiescent.G_R_dist.npy'), allow_pickle=True)
fn_edges, fn_nbins = np.load(os.path.join(dat_dir, 'obs',
'tinker.Mr_20.quiescent.FUV_NUV_dist.npy'), allow_pickle=True)
# calculate Mr from SEDs
R_mag = measureObs.AbsMag_sed(wave, sed, band='r_sdss')
# now sample from SDSS distribution using inverse transform sampling
gr_cdf = np.cumsum(gr_nbins)/np.sum(gr_nbins) # calculate CDFs for both distributions
fn_cdf = np.cumsum(fn_nbins)/np.sum(fn_nbins)
us = np.random.rand(ngal)
G_R = np.empty(ngal)
FUV_NUV = np.empty(ngal)
for i, u in enumerate(us):
G_R[i] = 0.5*(gr_edges[:-1] + gr_edges[1:])[np.abs(u - gr_cdf).argmin()]
FUV_NUV[i] = 0.5*(fn_edges[:-1] + fn_edges[1:])[np.abs(u - fn_cdf).argmin()]
return [R_mag, G_R, FUV_NUV]
def median_alongr(rmag, values, rmin=-20., rmax=-24., nbins=16):
''' find the median of specified values as a function of rmag
'''
dr = (rmin - rmax)/float(nbins)
medians = []
for i in range(nbins-1):
rbin = (rmag < rmin-dr*i) & (rmag >= rmin-dr*(i+1)) & np.isfinite(values)
medians.append(np.median(values[rbin]))
rmid = rmin - dr*(np.arange(nbins-1).astype(int)+0.5)
return rmid, np.array(medians)
def _read_sed(name, seed=0):
''' read in sed files
'''
if name not in ['simba', 'tng', 'eagle']: raise NotImplementedError
fhdf5 = os.path.join(dat_dir, 'sed', '%s.hdf5' % name)
f = h5py.File(fhdf5, 'r')
sed = {}
sed['wave'] = f['wave'][...]
sed['sed_neb'] = f['sed_neb'][...]
sed['sed_noneb'] = f['sed_noneb'][...]
sed['sed_onlyneb'] = sed['sed_neb'] - sed['sed_noneb'] # only nebular emissoins
sed['logmstar'] = f['logmstar'][...]
if 'logsfr.100' in f.keys():
sed['logsfr.100'] = f['logsfr.100'][...]
sed['logsfr.inst'] = f['logsfr.inst'][...]
sed['censat'] = f['censat'][...]
f.close()
'''
# deal with SFR resolution effect by unifromly sampling the SFR
# over 0 to resolution limit
if name == 'simba':
res_sfr = 0.182
elif name == 'tng':
res_sfr = 0.005142070183729021 # THIS IS WRONG!!!
np.random.seed(seed)
isnan = (~np.isfinite(sed['logsfr.100']))
sed['logsfr.100'][isnan] = np.log10(np.random.uniform(0., res_sfr, size=np.sum(isnan)))
'''
if 'logsfr.100' in f.keys():
isnan = (~np.isfinite(sed['logsfr.100']))
sed['logsfr.100'][isnan] = -999.
isnan = (~np.isfinite(sed['logsfr.inst']))
sed['logsfr.inst'][isnan] = -999.
return sed
def writeABC(type, pool, prior=None, abc_dir=None):
''' Given abcpmc pool object. Writeout specified ABC pool property
'''
if abc_dir is None:
abc_dir = os.path.join(dat_dir, 'abc')
if type == 'init': # initialize
if not os.path.exists(abc_dir):
try:
os.makedirs(abc_dir)
except OSError:
pass
# write specific info of the run
f = open(os.path.join(abc_dir, 'info.md'), 'w')
f.write('# '+run+' run specs \n')
f.write('N_particles = %i \n' % pool.N)
f.write('Distance | |
|= ww
for tt in ff:
vv -= und(tt)
return vv
def fund(ff):
und = transformsUnderlying
vv = set()
for tt in ff:
vv |= und(tt)
for (aa,ww) in ff:
vv -= ww
return vv
def depends(ff,vv):
und = transformsUnderlying
dd = dict([(v,(xx,ww)) for (xx,ww) in ff for v in ww])
yy = set(dd.keys())
def deps(uu,xx):
ff = []
for w in uu & yy - xx:
tt = dd[w]
ff.append(tt)
zz = xx.copy()
zz.add(w)
ff = ff + deps(und(tt),zz)
return ff
return set(deps(vv,set()))
def funion(ff,gg):
return qqff(ffqq(ff) | ffqq(gg))
def buildfftup(uu,vvg,ffg,ff,hh,hhp,hhrr,hhrrp):
return parametersSystemsBuilderTupleLevelNoSumlayerMultiEffectiveRepa_ui(xmax,omax,bmax,mmax,uu,vvg,ffg,ff,hh,hhp,hhrr,hhrrp)
def parter(uu,kk,bb,y1):
return parametersSystemsPartitionerMaxRollByMRepa_ui(mmax,umax,pmax,uu,kk,bb,y1)
def roller(qq):
return parametersRollerMaximumRollExcludedSelfRepa_i(qq)
def buildffdervar(uu,vv,ffg,ff,xx,xxp,xxrr,xxrrp):
(x1,s1) = parametersSystemsBuilderDerivedVarsLevelHighestNoSumlayerRepa_ui(wmax,omax,uu,vv,ffg,ff,xx,xxp,xxrr,xxrrp)
return ([(kk,a) for ((kk,_,_),a) in x1],s1)
def layer(uu,ff,mm,xx,xxp,xxrr,xxrrp,l):
if l > lmax:
return (uu,ff,mm)
layerer_log.info(">>> layer\tfud: %d\tlevel node: %d\tlayer: %d" % (f,g,l))
t1 = timer()
tupler_log.info(">>> tupler")
tupler_log.info("level substrate cardinality: %d" % len(vvg))
tupler_log.info("level fud derived cardinality: %d" % len(fder(ffg)))
tupler_log.info("fud cardinality: %d" % len(ffqq(ff)))
tupler_log.info("level excluded fud cardinality: %d" % len(ffqq(ff)-ffqq(ffg)))
stdout.flush()
(x2,s2) = buildfftup(uu,vvg,ffg,ff,xx,xxp,xxrr,xxrrp)
if len(x2) > 0:
tupler_log.info("tuple cardinality: %d" % len(x2))
tupler_log.info("max tuple algn: %.2f" % max([b for (a,b) in x2]))
else:
tupler_log.info("no tuples")
t2 = timer()
tupler_log.info("tupler\tsearched: %d\trate: %.2f" % (s2,s2/(t2-t1)))
tupler_log.info("<<< tupler %.3fs" % (t2-t1))
parter_log.info(">>> parter")
stdout.flush()
y3 = [parter(uu,kk,bb,y1) for ((kk,bb),y1) in x2]
x3 = [x for (ll,_) in y3 for x in ll]
s3 = sum([s for (_,s) in y3])
if len(x3) > 0:
parter_log.info("partitions cardinality: %d" % len(x3))
else:
parter_log.info("no tuple partitions")
t3 = timer()
parter_log.info("parter\tsearched: %d\trate: %.2f" % (s3,s3/(t3-t2)))
parter_log.info("<<< parter %.3fs" % (t3-t2))
roller_log.info(">>> roller")
stdout.flush()
y4 = [roller(qq) for qq in x3]
x4 = [x for (ll,_) in y4 for x in ll]
s4 = sum([s for (_,s) in y4])
if len(x4) > 0:
roller_log.info("roll cardinality: %d" % len(x4))
else:
roller_log.info("no rolls")
t4 = timer()
roller_log.info("roller\tsearched: %d\trate: %.2f" % (s4,s4/(t4-t3)))
roller_log.info("<<< roller %.3fs" % (t4-t3))
applier_log.info(">>> application")
stdout.flush()
ll0 = []
for (yy,pp) in x4:
for (jj,p) in zip(yy,pp):
if max(p) + 1 < len(p):
ii = list(zip(cart(uu,jj),p))
ll0.append(ii)
ll = []
for (b,ii) in enumerate(ll0):
w = VarPair((VarPair((VarPair((VarInt(f),VarInt(g))),VarInt(l))),VarInt(b+1)))
ww = sset([ValInt(u) for (_,u) in ii])
tt = trans(unit([sunion(ss,ssgl(w,ValInt(u))) for (ss,u) in ii]),sgl(w))
ll.append((tt,(w,ww)))
ll1 = []
for (tt,(w,ww)) in ll:
if all([len(ww) != len(ww1) or und(tt) != und(tt1) or ttpp(tt) != ttpp(tt1) for (tt1,(w1,ww1)) in ll if w > w1]):
ll1.append((tt,(w,ww)))
if len(ll1) > 0:
hh = qqff(sset([tt for (tt,_) in ll1]))
uu1 = uunion(uu,lluu([(w,ww) for (_,(w,ww)) in ll1]))
ffr = [tttr(uu1,tt) for (tt,_) in ll1]
xx1 = apply(xx,ffr)
xxp1 = hrhx(xx1)
xxrr1 = apply(xxrr,ffr)
xxrrp1 = hrhx(xxrr1)
gg = funion(funion(ff,hh),depends(ffg,fund(hh)))
applier_log.info("fud cardinality: %d" % len(ffqq(gg)))
t5 = timer()
applier_log.info("<<< application %.3fs" % (t5-t4))
dervarser_log.info( ">>> dervarser")
stdout.flush()
(mm1,s5) = buildffdervar(uu1,vvg,ffg,gg,xx1,xxp1,xxrr1,xxrrp1)
if len(mm1) > 0:
dervarser_log.info("der vars algn density: %.2f" % maxr(mm1))
else:
dervarser_log.info("no der vars sets")
t6 = timer()
dervarser_log.info("dervarser\tsearched: %d\trate: %.2f" % (s5,s5/(t6-t5)))
dervarser_log.info("<<< dervarser %.3fs" % (t6-t5))
layerer_log.info( "<<< layer %.3fs" % (t6-t1))
stdout.flush()
if l <= lmax and (len(mm) == 0 or maxr(mm1) > maxr(mm) + repaRounding):
(ffr,ll0,ll,ll1) = (None,None,None,None)
(x2,x3,x4) = (None,None,None)
return layer(uu1,gg,mm1,xx1,xxp1,xxrr1,xxrrp1,l+1)
else:
t5 = timer()
applier_log.info("<<< application %.3fs" % (t5-t4))
layerer_log.info( "<<< layer %.3fs" % (t5-t1))
stdout.flush()
return (uu,ff,mm)
layerer_log.info(">>> layerer")
t1 = timer()
x1 = layer(uu,fudEmpty(),[],xx,xxp,xxrr,xxrrp,1)
t2 = timer()
layerer_log.info("<<< layerer %.3fs" % (t2-t1))
stdout.flush()
return x1
# parametersSystemsLayererLevelMaxRollByMExcludedSelfHighestIORepa_u_1 ::
# Integer -> Integer -> Integer -> Integer -> Integer -> Integer -> Integer -> Integer ->
# System -> Set.Set Variable -> Fud ->
# HistoryRepa -> HistogramRepaRed -> HistoryRepa -> HistogramRepaRed -> Integer -> Integer ->
# IO (System, Fud, [(Set.Set Variable, Double)])
def parametersSystemsLayererLevelMaxRollByMExcludedSelfHighestIORepa_u_1(wmax,lmax,xmax,omax,bmax,mmax,umax,pmax,uu,vvg,ffg,xx,xxp,xxrr,xxrrp,f,g):
repaRounding = 1e-6
def sgl(x):
return sset([x])
def maxr(mm):
if len(mm) > 0:
return list(sset([b for (_,b) in mm]))[-1:][0]
return 0
uvars = systemsSetVar
cart = systemsSetVarsSetStateCartesian_u
lluu = listsSystem_u
uunion = pairSystemsUnion
sunion = pairStatesUnionLeft
ssgl = stateSingleton
llaa = listsHistogram_u
hhvvr = historyRepasVectorVar
apvvr = histogramRepaRedsVectorVar
hrhx = historyRepasRed
def unit(qq):
return llaa([(ss,1) for ss in qq])
tttr = systemsTransformsTransformRepa_u
apply = historyRepasListTransformRepasApply_u
trans = histogramsSetVarsTransform_u
ttpp = transformsPartition
und = transformsUnderlying
qqff = setTransformsFud_u
ffqq = fudsSetTransform
fund = fudsUnderlying
fder = fudsDerived
depends = fudsSetVarsDepends
def funion(ff,gg):
return qqff(ffqq(ff) | ffqq(gg))
def buildfftup(uu,vvg,ffg,ff,hh,hhp,hhrr,hhrrp):
return parametersSystemsBuilderTupleLevelNoSumlayerMultiEffectiveRepa_ui(xmax,omax,bmax,mmax,uu,vvg,ffg,ff,hh,hhp,hhrr,hhrrp)
def parter(uu,kk,bb,y1):
return parametersSystemsPartitionerMaxRollByMRepa_ui(mmax,umax,pmax,uu,kk,bb,y1)
def roller(qq):
return parametersRollerMaximumRollExcludedSelfRepa_i(qq)
def buildffdervar(uu,vv,ffg,ff,xx,xxp,xxrr,xxrrp):
(x1,s1) = parametersSystemsBuilderDerivedVarsLevelHighestNoSumlayerRepa_ui(wmax,omax,uu,vv,ffg,ff,xx,xxp,xxrr,xxrrp)
return ([(kk,a) for ((kk,_,_),a) in x1],s1)
def layer(uu,ff,mm,xx,xxp,xxrr,xxrrp,l):
if l > lmax:
return (uu,ff,mm)
layerer_log.info(">>> layer\tfud: %d\tlevel node: %d\tlayer: %d" % (f,g,l))
t1 = timer()
tupler_log.info(">>> tupler")
tupler_log.info("level substrate cardinality: %d" % len(vvg))
tupler_log.info("level fud derived cardinality: %d" % len(fder(ffg)))
tupler_log.info("fud cardinality: %d" % len(ffqq(ff)))
tupler_log.info("level excluded fud cardinality: %d" % len(ffqq(ff)-ffqq(ffg)))
stdout.flush()
(x2,s2) = buildfftup(uu,vvg,ffg,ff,xx,xxp,xxrr,xxrrp)
if len(x2) > 0:
tupler_log.info("tuple cardinality: %d" % len(x2))
tupler_log.info("max tuple algn: %.2f" % max([b for (a,b) in x2]))
else:
tupler_log.info("no tuples")
t2 = timer()
tupler_log.info("tupler\tsearched: %d\trate: %.2f" % (s2,s2/(t2-t1)))
tupler_log.info("<<< tupler %.3fs" % (t2-t1))
parter_log.info(">>> parter")
stdout.flush()
y3 = [parter(uu,kk,bb,y1) for ((kk,bb),y1) in x2]
x3 = [x for (ll,_) in y3 for x in ll]
s3 = sum([s for (_,s) in y3])
if len(x3) > 0:
parter_log.info("partitions cardinality: %d" % len(x3))
else:
parter_log.info("no tuple partitions")
t3 = timer()
parter_log.info("parter\tsearched: %d\trate: %.2f" % (s3,s3/(t3-t2)))
parter_log.info("<<< parter %.3fs" % (t3-t2))
roller_log.info(">>> roller")
stdout.flush()
y4 = [roller(qq) for qq in x3]
x4 = [x for (ll,_) in y4 for x in ll]
s4 = sum([s for (_,s) in y4])
if len(x4) > 0:
roller_log.info("roll cardinality: %d" % len(x4))
else:
roller_log.info("no rolls")
t4 = timer()
roller_log.info("roller\tsearched: %d\trate: %.2f" % (s4,s4/(t4-t3)))
roller_log.info("<<< roller %.3fs" % (t4-t3))
applier_log.info(">>> application")
stdout.flush()
ll0 = []
for (yy,pp) in x4:
for (jj,p) in zip(yy,pp):
if max(p) + 1 < len(p):
ii = list(zip(cart(uu,jj),p))
ll0.append(ii)
ll = []
for (b,ii) in enumerate(ll0):
w = VarPair((VarPair((VarPair((VarInt(f),VarInt(g))),VarInt(l))),VarInt(b+1)))
ww = sset([ValInt(u) for (_,u) in ii])
tt = trans(unit([sunion(ss,ssgl(w,ValInt(u))) for (ss,u) in ii]),sgl(w))
ll.append((tt,(w,ww)))
ll1 = []
for (tt,(w,ww)) in ll:
if all([len(ww) != len(ww1) or und(tt) != und(tt1) or ttpp(tt) != ttpp(tt1) for (tt1,(w1,ww1)) in ll if w > w1]):
ll1.append((tt,(w,ww)))
if len(ll1) > 0:
hh = qqff(sset([tt for (tt,_) in ll1]))
uu1 = uunion(uu,lluu([(w,ww) for (_,(w,ww)) in ll1]))
ffr = [tttr(uu1,tt) for (tt,_) in ll1]
xx1 = apply(xx,ffr)
xxp1 = hrhx(xx1)
xxrr1 = apply(xxrr,ffr)
xxrrp1 = hrhx(xxrr1)
gg = funion(funion(ff,hh),depends(ffg,fund(hh)))
applier_log.info("fud cardinality: %d" % len(ffqq(gg)))
t5 = timer()
applier_log.info("<<< application %.3fs" % (t5-t4))
dervarser_log.info( ">>> dervarser")
stdout.flush()
(mm1,s5) = buildffdervar(uu1,vvg,ffg,gg,xx1,xxp1,xxrr1,xxrrp1)
if len(mm1) > 0:
dervarser_log.info("der vars algn density: %.2f" % maxr(mm1))
else:
dervarser_log.info("no der vars sets")
t6 = timer()
dervarser_log.info("dervarser\tsearched: %d\trate: %.2f" % (s5,s5/(t6-t5)))
dervarser_log.info("<<< dervarser %.3fs" % (t6-t5))
layerer_log.info( "<<< layer %.3fs" % (t6-t1))
stdout.flush()
if l <= lmax and (len(mm) == 0 or maxr(mm1) > maxr(mm) + repaRounding):
(ffr,ll0,ll,ll1) = (None,None,None,None)
(x2,x3,x4) = (None,None,None)
return layer(uu1,gg,mm1,xx1,xxp1,xxrr1,xxrrp1,l+1)
else:
t5 = timer()
applier_log.info("<<< application %.3fs" % (t5-t4))
layerer_log.info( "<<< layer %.3fs" % (t5-t1))
stdout.flush()
return (uu,ff,mm)
layerer_log.info(">>> layerer")
t1 = timer()
x1 = layer(uu,fudEmpty(),[],xx,xxp,xxrr,xxrrp,1)
t2 = timer()
layerer_log.info("<<< layerer %.3fs" % (t2-t1))
stdout.flush()
return x1
# parametersSystemsHistoryRepasDecomperLevelMaxRollByMExcludedSelfHighestFmaxIORepa ::
# Integer -> Integer -> Integer -> Integer -> Integer -> Integer -> Integer -> Integer -> Integer ->
# Integer -> Integer ->
# System -> Tree (Integer, Set.Set Variable, Fud) -> HistoryRepa ->
# IO (Maybe (System, DecompFud))
def parametersSystemsHistoryRepasDecomperLevelMaxRollByMExcludedSelfHighestFmaxIORepa(wmax,lmax,xmax,omax,bmax,mmax,umax,pmax,fmax,mult,seed,uu,zzg,aa):
repaRounding = 1e-6
dom = relationsDomain
def maxd(mm):
if len(mm) > 0:
return list(sset([(b,a) for (a,b) in mm]))[-1]
return (0,sset())
def tsgl(r):
return sdict([(r,sdict())])
uvars = systemsSetVar
acard = histogramsCardinality
trim = histogramsTrim
aall = histogramsList
def red(aa,vv):
return setVarsHistogramsReduce(vv,aa)
def unit(ss):
return setStatesHistogramUnit(sset([ss]))
qqff = setTransformsFud_u
ffqq = fudsSetTransform
def fder(ff):
und = transformsUnderlying
vv = set()
for (aa,ww) in ff:
vv |= ww
for tt in ff:
vv -= und(tt)
return vv
def fvars(ff):
vars = histogramsSetVar
vv = set()
for (aa,ww) in ff:
vv |= vars(aa)
return vv
def fund(ff):
und = transformsUnderlying
vv = set()
for tt in ff:
vv |= und(tt)
for (aa,ww) in ff:
| |
# coding: utf-8
"""
Jamf Pro API
## Overview This is a sample Jamf Pro server which allows for usage without any authentication. The Jamf Pro environment which supports the Try it Out functionality does not run the current beta version of Jamf Pro, thus any newly added endpoints will result in an error and should be used soley for documentation purposes. # noqa: E501
The version of the OpenAPI document: 10.25.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from jamf.api_client import ApiClient
from jamf.exceptions import ( # noqa: F401
ApiTypeError,
ApiValueError
)
class PatchPoliciesPreviewApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def patch_patch_policies_get(self, **kwargs): # noqa: E501
"""Return a list of patch policies # noqa: E501
Returns a list of patch policies. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_patch_policies_get(async_req=True)
>>> result = thread.get()
:param on_dashboard: Filters whether or not the patch policies are on the dashboard.
:type on_dashboard: bool
:param enabled: Filters whether or not the patch policies are enabled.
:type enabled: bool
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: list[PatchPolicySummary]
"""
kwargs['_return_http_data_only'] = True
return self.patch_patch_policies_get_with_http_info(**kwargs) # noqa: E501
def patch_patch_policies_get_with_http_info(self, **kwargs): # noqa: E501
"""Return a list of patch policies # noqa: E501
Returns a list of patch policies. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_patch_policies_get_with_http_info(async_req=True)
>>> result = thread.get()
:param on_dashboard: Filters whether or not the patch policies are on the dashboard.
:type on_dashboard: bool
:param enabled: Filters whether or not the patch policies are enabled.
:type enabled: bool
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(list[PatchPolicySummary], status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'on_dashboard',
'enabled'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_patch_policies_get" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'on_dashboard' in local_var_params and local_var_params['on_dashboard'] is not None: # noqa: E501
query_params.append(('onDashboard', local_var_params['on_dashboard'])) # noqa: E501
if 'enabled' in local_var_params and local_var_params['enabled'] is not None: # noqa: E501
query_params.append(('enabled', local_var_params['enabled'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
response_types_map = {
200: "list[PatchPolicySummary]",
}
return self.api_client.call_api(
'/patch/patch-policies', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def patch_patch_policies_id_dashboard_delete(self, id, **kwargs): # noqa: E501
"""Remove a patch policy from the dashboard # noqa: E501
Removes a patch policy from the dashboard. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_patch_policies_id_dashboard_delete(id, async_req=True)
>>> result = thread.get()
:param id: patch id (required)
:type id: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
kwargs['_return_http_data_only'] = True
return self.patch_patch_policies_id_dashboard_delete_with_http_info(id, **kwargs) # noqa: E501
def patch_patch_policies_id_dashboard_delete_with_http_info(self, id, **kwargs): # noqa: E501
"""Remove a patch policy from the dashboard # noqa: E501
Removes a patch policy from the dashboard. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_patch_policies_id_dashboard_delete_with_http_info(id, async_req=True)
>>> result = thread.get()
:param id: patch id (required)
:type id: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
local_var_params = locals()
all_params = [
'id'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_patch_policies_id_dashboard_delete" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in local_var_params or # noqa: E501
local_var_params['id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `id` when calling `patch_patch_policies_id_dashboard_delete`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = [] # noqa: E501
response_types_map = {}
return self.api_client.call_api(
'/patch/patch-policies/{id}/dashboard', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def patch_patch_policies_id_dashboard_get(self, id, **kwargs): # noqa: E501
"""Return whether or not the requested patch policy is on the dashboard # noqa: E501
Returns whether or not the requested patch policy is on the dashboard # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_patch_policies_id_dashboard_get(id, async_req=True)
>>> result = thread.get()
:param id: patch policy id (required)
:type id: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is | |
<gh_stars>0
print("""
▄████▄ ▄▄▄ ██▒ █▓█████ ██▀███ ███▄ █ ▄▄▄
▒██▀ ▀█ ▒████▄▓██░ █▓█ ▀▓██ ▒ ██▒██ ▀█ █▒████▄
▒▓█ ▄▒██ ▀█▓██ █▒▒███ ▓██ ░▄█ ▓██ ▀█ ██▒██ ▀█▄
▒▓▓▄ ▄██░██▄▄▄▄█▒██ █░▒▓█ ▄▒██▀▀█▄ ▓██▒ ▐▌██░██▄▄▄▄██
▒ ▓███▀ ░▓█ ▓██▒▀█░ ░▒████░██▓ ▒██▒██░ ▓██░▓█ ▓██▒
░ ░▒ ▒ ░▒▒ ▓▒█░ ▐░ ░░ ▒░ ░ ▒▓ ░▒▓░ ▒░ ▒ ▒ ▒▒ ▓▒█░
░ ▒ ▒ ▒▒ ░ ░░ ░ ░ ░ ░▒ ░ ▒░ ░░ ░ ▒░ ▒ ▒▒ ░
░ ░ ▒ ░░ ░ ░░ ░ ░ ░ ░ ░ ▒
░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░
▓█████▄ ▒█████ ██████ ███▄ ▄███▓▒█████ ███▄ █ ██████▄▄▄█████▓██▀███ ▒█████ ██████
▒██▀ ██▒██▒ ██▒██ ▒ ▓██▒▀█▀ ██▒██▒ ██▒██ ▀█ █▒██ ▒▓ ██▒ ▓▓██ ▒ ██▒██▒ ██▒██ ▒
░██ █▒██░ ██░ ▓██▄ ▓██ ▓██▒██░ ██▓██ ▀█ ██░ ▓██▄ ▒ ▓██░ ▒▓██ ░▄█ ▒██░ ██░ ▓██▄
░▓█▄ ▒██ ██░ ▒ ██▒ ▒██ ▒██▒██ ██▓██▒ ▐▌██▒ ▒ ██░ ▓██▓ ░▒██▀▀█▄ ▒██ ██░ ▒ ██▒
░▒████▓░ ████▓▒▒██████▒▒ ▒██▒ ░██░ ████▓▒▒██░ ▓██▒██████▒▒ ▒██▒ ░░██▓ ▒██░ ████▓▒▒██████▒▒
▒▒▓ ▒░ ▒░▒░▒░▒ ▒▓▒ ▒ ░ ░ ▒░ ░ ░ ▒░▒░▒░░ ▒░ ▒ ▒▒ ▒▓▒ ▒ ░ ▒ ░░ ░ ▒▓ ░▒▓░ ▒░▒░▒░▒ ▒▓▒ ▒ ░
░ ▒ ▒ ░ ▒ ▒░░ ░▒ ░ ░ ░ ░ ░ ░ ▒ ▒░░ ░░ ░ ▒░ ░▒ ░ ░ ░ ░▒ ░ ▒░ ░ ▒ ▒░░ ░▒ ░ ░
░ ░ ░░ ░ ░ ▒ ░ ░ ░ ░ ░ ░ ░ ░ ▒ ░ ░ ░░ ░ ░ ░ ░░ ░░ ░ ░ ▒ ░ ░ ░
░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░
░
""")
print("featuring Power Puff Girls")
print()
intro = print("Três irmãs, Florzinha, Lindinha e Docinho, decidiram fazer uma viagem de carro juntas.")
print("Florzinha é estudante de veterinária e sempre leva consigo um kit de primeiros socorros e medicamentos para os bichinhos.")
print("Lindinha é estudante de engenharia, bastante metódica e pragmática.")
print("Florzinha, por sua vez, é a mais artista do grupo e dedica-se ao estudo da música. Muito sensível, acredita que a música opera mágica.\nPor isso, sempre traz consigo o seu violino.")
print("Distraídas, não perceberam quando os comandos do carro começaram a falhar logo que adentraram em um longo túnel.\nO susto veio quando, sem qualquer ação da motorista, a velocidade subitamente começou a aumentar.\nApavoradas, tentaram retomar o controle do carro, em vão...\nTudo que se lembram é de serem atingidas por uma forte luz que vinha na direção contrária e, então,um profundo silêncio.")
print()
#Capítulo1: Escolhendo o personagem.
jogadoras = ["Florzinha","Lindinha","Docinho"]
h1 = ("******Capítulo 1******Escolhendo o jogador*******************************").upper()
print(h1)
print()
print("[-Meu deus! O que aconteceu? Minha cabeça dói… Onde estou? Cadê todo mundo?]")
print()
print("~~~Escolha uma personagem para jogar e preencha o campo abaixo.~~~")
jogadoraEscolhida = input("Digite o nome da personagem: ").capitalize()
while jogadoraEscolhida not in jogadoras:
print("Erro!!! Você deve escolher uma das seguintes personagens: Florzinha, Lindinha ou Docinho")
print()
jogadoraEscolhida = input("Digite o nome da personagem: ").capitalize()
if jogadoraEscolhida in jogadoras:
print("Você escolheu: ", jogadoraEscolhida)
print('Boa sorte!!!')
#Capítulo2: Escolhendo um item de auxílio.
print()
print()
h2 = ("******Capítulo 2 ******Escolhendo um item de auxílio*********************").upper()
print(h2)
A = "kit_de_primeiros_socorros_para_animais"
B = "smartphone"
C = "violino"
listaItens = ["A", "B", "C"]
print("- Que lugar esquisito! Parece uma caverna…\nO carro está completamente destruído, não vai servir para me tirar daqui …\nTalvez alguma coisa das nossas bagagens possa ser útil …\nQue sorte a minha! Olha o que encontrei: \n um kit de primeiros socorros para animais,\num smartphone e um violino!")
print()
print("~Você deve escolher apenas um dos itens referidos para ajudar", jogadoraEscolhida, " nesta jornada.~")
print("A - kit de primeiros socorros para animais\nB - smartphone\nC - violino")
print()
itemEscolhido = input("Digite a letra correspondente ao item escolhido: ").upper()
while itemEscolhido not in listaItens:
print("ERRO!! Você deve escolher uma das seguintes opções: A, B ou C")
itemEscolhido = input("Digite a letra correspondente ao item escolhido: ")
if itemEscolhido in listaItens:
if itemEscolhido == "A":
print()
print(jogadoraEscolhida, "devidamente equipada com", A, "está pronta para o game!!")
elif itemEscolhido == "B":
print()
print(jogadoraEscolhida, "devidamente equipada com", B, "está pronta para o game!!")
elif itemEscolhido == "C":
print()
print(jogadoraEscolhida, "devidamente equipada com", C, "está pronta para o game!!")
print()
print()
#Capitulo3: Monstros à vista.
h3 = ("******Capítulo 3 ******Monstros à vista!!********************************").upper()
print(h3)
print("[-Olha! Algumas inscrições na parede.. deixe-me ver o que dizem..]")
print()
print("~~~", jogadoraEscolhida, " se aproxima da parede e lê a seguinte mensagem:~~~")
print()
print("[Bem vindo à gruta dos monstros!\nVocês, humanos, se consideram tão espertos…\nProve ou morrerá neste lugar.]\n")
print("~~~", jogadoraEscolhida, " encontrará alguns monstros pelo caminho.\nO monstro não a deixará passar a menos que o enigma proposto seja solucionado…\nou o monstro neutralizado.\nPara neutralizá-lo, tente usar o utensílio escolhido no início do jogo.~~~\n")
print("Vamos para o primeiro desafio!")
print()
#Capítulo4: Primeiro desafio: encontrando a Esfinge.
h4 = ("******Capítulo 4******Primeiro desafio: encontrando a Esfinge do Mal*****").upper()
print(h4)
print()
print("-Perfeito! E eu achando que as provas da facul do final de semestre seriam o meu maior problema...\nVejo uma sombra mais à frente, parecem asas…\nO que pode ser?")
print("""
..................................*VM$$$$$$$$$$$$$$NIIIIV.....
...............................:VM$$$$$$$$$$$$$$$$$NN$NF:.....
.............................:VN$$$$$$$$$$$$$$$$$$MIIIV.......
............................*N$$$$$$$$$$$$$$$$$$$$NN$N*.......
...........................F$$$$$$$$$$$$$$$$$$$$$$MIV:........
..........................:M$$$$$$$$$$$$$$$$$MIIIIMNV.........
...............**::::::....*$$$$$$$$$$$$$$$$$$$$$$MV:.........
...............*$NN$$$NI*...F$$$$$$$$$$$$$$$$$$MIV:...........
...............:FIIN$$$$$V..:M$$$$$$$$$$$$$NIIMNM:............
...............:MMVM$$$$$M:..V$$$$$$$$$$$$$$$$MV:.............
...............*N$VN$$$$$M:..:N$$$$$$$$$MIIIIV................
................VNV$$$$$$M:...F$$$$$$$$$MN$$N*................
................V*V$$$NMI*...*V$$$$$$$$$NMII:.................
...............:MVMMIIIFV:..*VM$$$$$NIIIMMNI:.................
...............*VVIIIIII::*FIF$$$$$$$$$NMIV...................
...............**IIM$$$MVMIIM$$$$$$$$MMM$M*...................
..............:VN$$$$$$NVM$$$$$$$$$$NMMI*:....................
.............*M$$N$$$$$$V$$$$$$$$$NMIMMV......................
.............F$$$VM$$$$$NVN$$$$$$$$$$N*.......................
.............:VN$VN$$$MN$NIIMMNNNMIV*:........................
...............:MVI$$$IF$$$$MMIIMIIFV*:.......................
...............:VFV$$$NV$$$$$$$$$$$$$$M*......****:...........
................VMVV$$$VM$$NIIM$$$$$$$$$V.....V*..............
................*$MV$$$VMNFIN$$$$$$$$$$$M:....:VV:............
................:NFI$$M:*VM$$$$$$$$$$$$FN*......:VV:..........
................:NVN$$*.*V$$$$$$$$$$$N*.VF........*F*.........
................:MV$$V...*N$$$$$$$$NF*..:M*........:I:........
................*FI$I:....*VN$$$$$$NNI:..*I:........I*........
..............::*V$M:......:*VVFIIM$$$I:..*F*......VI:........
""")
print("~~~", jogadoraEscolhida," se depara com a esfinge, uma criatura imponente, com o corpo de leão e a cabeça de um humano.\nA Esfinge lhe propõe um desafio:~~~")
print()
print("- Decifra-me ou te devoro. Que animal anda pela manhã sobre quatro patas, à tarde sobre duas e à noite, sobre três? Se errar, ainda te ofereço três novas chances.")
#Primeiro enigma
solucao1 = "Homem"
resposta1Jogadora = input("O animal é o (a) ").capitalize()
chance = 0
while solucao1 != resposta1Jogadora and chance < 3:
print("Você errou!")
print()
chance = chance +1
print("chance:", chance)
resposta1Jogadora = input("O animal é o (a) ").capitalize()
print()
if resposta1Jogadora == solucao1:
print("Você acertou!")
print("Preparada para o próximo desafio?")
# chances esgotadas. Ativando o item escolhido.
if chance == 3 and resposta1Jogadora != solucao1:
print("Suas chances acabaram. Deseja tentar neutralizar o monstro com o item escolhido?")
neutralizacao1 = input("Digite S/N: ").upper()
if neutralizacao1 != ("S" or "N"):
print: ("Você deve digitar S ou N")
if neutralizacao == "N":
print("Você morreu.")
print("GAME OVER")
print("**********************************************************************")
elif neutralizacao == "S":
if itemEscolhido == "A" and jogadoraEscolhida == "Florzinha":
print("O item escolhido foi o kit_de_primeiros_socorros_para_animais")
print(
"Parabéns, Florzinha! Você foi muito esperta por trazer em seu kit sedativo suficiente para apagar um touro! A Esfinge do Mal sequer percebeu a injeção que você aplicou. Dormirá, agora, o sono dos justos.")
elif itemEscolhido == "C" and jogadoraEscolhida == "Lindinha":
print("O item escolhido foi o smartphone")
print("Um smartphone com sinal e bateria salva a vida, não é, Lindinha! God bless the Google!")
elif itemEscolhido == "B":
print("O item escolhido foi o violino.")
print("A Esfinge do Mal não viu graça alguma no instrumento e o destruiu...junto com você!")
print("Você morreu. GAME OVER.")
elif itemEscolhido == "A" and jogadoraEscolhida != "Florzinha":
print("O item escolhido foi o kit_de_primeiros_socorros_para_animais")
print(
"Que sorte encontrar um sedativo para animais tão potente... Pena que você, sem experiência, se atrapalhou e não conseguiu aplicar a injeção.")
print("Você morreu. GAME OVER.")
print()
print()
#Capítulo5: Segundo desafio: encontrando o Minotauro.
h5 = ("******Capítulo 5******Segundo desafio: encontrando o Minotauro**********").upper()
print(h5)
print()
print("[-Vejo outro monstro… aquilo são ... CHIFRES?!]")
print("""
....::................:::..............
....:*::..............**:..............
....:V*:.............:**:..............
...:*V*:..............*V*..............
...:*VV****VVVVVV*****VV*:.............
.....::*VMNNNNNNNNNMV*::...............
........::M$NNNN$N*:...................
.....::**VNNNNNNN$I*:::................
....:*VVVVINMMMM$MVVV*::...............
....*VVVVVVIN$$NIVVVVVV:..:::***:......
...:VVVVVVVVVVVVVVVVVVV*::**VV*::......
..:*VVVV*VVVVVVVVVV*VVV*:*VVVVV****:...
.::VVVV::*VVVVVVVV*:*VVV:**:**VVVV*::::
.:*VVV*::*MMIIIIMF:::*VVV*:::*VVVVV****
::*MMM*:*IMMMMMMMNI*:*VMNI*VV*::*VVVV*:
:.:IMI**IMMMMMMMMMNM:::VFVVV:::*****::.
:::*VVVMNMMMMMMMMMNN**VFV***:..:::::...
..:**VIN$$NMMMMNN$$NIIV*::.............
...:*M$$$$VVIMIN$$$NV*:................
...:VNN$$V::**::INN$V::................
....*VM$$N*:::.:VN$$I:.................
....:::*VM$M*:.:FN$$N*.................
........*MNV:...*IN$V:.................
.......*M$V:....::I$*:.................
......:VFV*:.....:*$M*:................
......:VV**:......*VMV::...............
......::::........**V*:................
.......................................
""")
print("~~Eis que", jogadoraEscolhida," se depara com o Minotauro, uma criatura bestial, com o corpo de homem e a cabeça de um touro.\nEntre berros e urros, o Minotauro apresenta o enigma:")
print()
print("-Dois pais e dois filhos sentaram-se para comer ovos no café da manhã. Cada um comeu um ovo. Quantos ovos eles comeram no total? Se errar, lhe dou duas outras chances de acerto.")
print("1 ovo")
print("3 ovos")
print("5 ovos")
print("8 ovos")
#Segundo enigma
solucao2 = "3"
resposta2Jogadora = input("A quantidade de ovos é: ")
chance = 0
while solucao2 != resposta2Jogadora and chance < 2:
print("Você errou!")
chance = chance + 1
print("chance", chance)
resposta2Jogadora = input("A quantidade de ovos é: ")
if resposta2Jogadora == solucao2:
print("Você acertou!")
print("Preparada para o último desafio?")
# Chances esgotadas. Ativando o item escolhido.
if chance == 2 and resposta2Jogadora != solucao2:
print("Suas chances acabaram.Deseja tentar neutralizar o monstro com o item escolhido?")
neutralizacao = input("Digite S/N: ").upper()
if neutralizacao != ("S" or "N"):
print: ("Você deve digitar S ou N")
if neutralizacao == "N":
print("Você morreu.")
print("GAME OVER")
print("*********************************************************************")
elif neutralizacao == "S":
if itemEscolhido == "A" and jogadoraEscolhida == "Florzinha":
print("O item escolhido foi o kit_de_primeiros_socorros_para_animais")
print(
"Parabéns, | |
"""
Common utility functions and classes adpated from
https://github.com/matterport/Mask_RCNN/blob/master/mrcnn/utils.py
Mask R-CNN
The MIT License (MIT)
Copyright (c) 2017 Matterport, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import numpy as np
############################################################
# Bounding Boxes
############################################################
def extract_bboxes(mask):
"""Compute bounding boxes from masks.
mask: [height, width, num_instances]. Mask pixels are either 1 or 0.
Returns: bbox array [num_instances, (y1, x1, y2, x2)].
"""
boxes = np.zeros([mask.shape[-1], 4], dtype=np.int32)
for i in range(mask.shape[-1]):
m = mask[:, :, i]
# Bounding box.
horizontal_indicies = np.where(np.any(m, axis=0))[0]
vertical_indicies = np.where(np.any(m, axis=1))[0]
if horizontal_indicies.shape[0]:
x1, x2 = horizontal_indicies[[0, -1]]
y1, y2 = vertical_indicies[[0, -1]]
# x2 and y2 should not be part of the box. Increment by 1.
x2 += 1
y2 += 1
else:
# No mask for this instance. Might happen due to
# resizing or cropping. Set bbox to zeros
x1, x2, y1, y2 = 0, 0, 0, 0
boxes[i] = np.array([y1, x1, y2, x2])
return boxes.astype(np.int32)
def compute_iou(box, boxes, box_area, boxes_area):
"""Calculates IoU of the given box with the array of the given boxes.
box: 1D vector [y1, x1, y2, x2]
boxes: [boxes_count, (y1, x1, y2, x2)]
box_area: float. the area of 'box'
boxes_area: array of length boxes_count.
Note: the areas are passed in rather than calculated here for
efficiency. Calculate once in the caller to avoid duplicate work.
"""
# Calculate intersection areas
y1 = np.maximum(box[0], boxes[:, 0])
y2 = np.minimum(box[2], boxes[:, 2])
x1 = np.maximum(box[1], boxes[:, 1])
x2 = np.minimum(box[3], boxes[:, 3])
intersection = np.maximum(x2 - x1, 0) * np.maximum(y2 - y1, 0)
union = box_area + boxes_area[:] - intersection[:]
iou = intersection / union
return iou
def compute_overlaps(boxes1, boxes2):
"""Computes IoU overlaps between two sets of boxes.
boxes1, boxes2: [N, (y1, x1, y2, x2)].
For better performance, pass the largest set first and the smaller second.
"""
# Areas of anchors and GT boxes
area1 = (boxes1[:, 2] - boxes1[:, 0]) * (boxes1[:, 3] - boxes1[:, 1])
area2 = (boxes2[:, 2] - boxes2[:, 0]) * (boxes2[:, 3] - boxes2[:, 1])
# Compute overlaps to generate matrix [boxes1 count, boxes2 count]
# Each cell contains the IoU value.
overlaps = np.zeros((boxes1.shape[0], boxes2.shape[0]))
for i in range(overlaps.shape[1]):
box2 = boxes2[i]
overlaps[:, i] = compute_iou(box2, boxes1, area2[i], area1)
return overlaps
def compute_overlaps_masks(masks1, masks2):
"""Computes IoU overlaps between two sets of masks.
masks1, masks2: [Height, Width, instances]
"""
# If either set of masks is empty return empty result
if masks1.shape[-1] == 0 or masks2.shape[-1] == 0:
return np.zeros((masks1.shape[-1], masks2.shape[-1]))
# flatten masks and compute their areas
masks1 = np.reshape(masks1 > .5, (-1, masks1.shape[-1])).astype(np.float32)
masks2 = np.reshape(masks2 > .5, (-1, masks2.shape[-1])).astype(np.float32)
area1 = np.sum(masks1, axis=0)
area2 = np.sum(masks2, axis=0)
# intersections and union
intersections = np.dot(masks1.T, masks2)
union = area1[:, None] + area2[None, :] - intersections
overlaps = intersections / union
return overlaps
def non_max_suppression(boxes, scores, threshold):
"""Performs non-maximum suppression and returns indices of kept boxes.
boxes: [N, (y1, x1, y2, x2)]. Notice that (y2, x2) lays outside the box.
scores: 1-D array of box scores.
threshold: Float. IoU threshold to use for filtering.
"""
assert boxes.shape[0] > 0
if boxes.dtype.kind != "f":
boxes = boxes.astype(np.float32)
# Compute box areas
y1 = boxes[:, 0]
x1 = boxes[:, 1]
y2 = boxes[:, 2]
x2 = boxes[:, 3]
area = (y2 - y1) * (x2 - x1)
# Get indicies of boxes sorted by scores (highest first)
ixs = scores.argsort()[::-1]
pick = []
while len(ixs) > 0:
# Pick top box and add its index to the list
i = ixs[0]
pick.append(i)
# Compute IoU of the picked box with the rest
iou = compute_iou(boxes[i], boxes[ixs[1:]], area[i], area[ixs[1:]])
# Identify boxes with IoU over the threshold. This
# returns indices into ixs[1:], so add 1 to get
# indices into ixs.
remove_ixs = np.where(iou > threshold)[0] + 1
# Remove indices of the picked and overlapped boxes.
ixs = np.delete(ixs, remove_ixs)
ixs = np.delete(ixs, 0)
return np.array(pick, dtype=np.int32)
def apply_box_deltas(boxes, deltas):
"""Applies the given deltas to the given boxes.
boxes: [N, (y1, x1, y2, x2)]. Note that (y2, x2) is outside the box.
deltas: [N, (dy, dx, log(dh), log(dw))]
"""
boxes = boxes.astype(np.float32)
# Convert to y, x, h, w
height = boxes[:, 2] - boxes[:, 0]
width = boxes[:, 3] - boxes[:, 1]
center_y = boxes[:, 0] + 0.5 * height
center_x = boxes[:, 1] + 0.5 * width
# Apply deltas
center_y += deltas[:, 0] * height
center_x += deltas[:, 1] * width
height *= np.exp(deltas[:, 2])
width *= np.exp(deltas[:, 3])
# Convert back to y1, x1, y2, x2
y1 = center_y - 0.5 * height
x1 = center_x - 0.5 * width
y2 = y1 + height
x2 = x1 + width
return np.stack([y1, x1, y2, x2], axis=1)
def box_refinement_graph(box, gt_box):
"""Compute refinement needed to transform box to gt_box.
box and gt_box are [N, (y1, x1, y2, x2)]
"""
box = tf.cast(box, tf.float32)
gt_box = tf.cast(gt_box, tf.float32)
height = box[:, 2] - box[:, 0]
width = box[:, 3] - box[:, 1]
center_y = box[:, 0] + 0.5 * height
center_x = box[:, 1] + 0.5 * width
gt_height = gt_box[:, 2] - gt_box[:, 0]
gt_width = gt_box[:, 3] - gt_box[:, 1]
gt_center_y = gt_box[:, 0] + 0.5 * gt_height
gt_center_x = gt_box[:, 1] + 0.5 * gt_width
dy = (gt_center_y - center_y) / height
dx = (gt_center_x - center_x) / width
dh = tf.log(gt_height / height)
dw = tf.log(gt_width / width)
result = tf.stack([dy, dx, dh, dw], axis=1)
return result
def box_refinement(box, gt_box):
"""Compute refinement needed to transform box to gt_box.
box and gt_box are [N, (y1, x1, y2, x2)]. (y2, x2) is
assumed to be outside the box.
"""
box = box.astype(np.float32)
gt_box = gt_box.astype(np.float32)
height = box[:, 2] - box[:, 0]
width = box[:, 3] - box[:, 1]
center_y = box[:, 0] + 0.5 * height
center_x = box[:, 1] + 0.5 * width
gt_height = gt_box[:, 2] - gt_box[:, 0]
gt_width = gt_box[:, 3] - gt_box[:, 1]
gt_center_y = gt_box[:, 0] + 0.5 * gt_height
gt_center_x = gt_box[:, 1] + 0.5 * gt_width
dy = (gt_center_y - center_y) / height
dx = (gt_center_x - center_x) / width
dh = np.log(gt_height / height)
dw = np.log(gt_width / width)
return np.stack([dy, dx, dh, dw], axis=1)
############################################################
# Miscellaneous
############################################################
def trim_zeros(x):
"""It's common to have tensors larger than the available data and
pad with zeros. This function removes rows that are all zeros.
x: [rows, columns].
"""
assert len(x.shape) == 2
return x[~np.all(x == 0, axis=1)]
def compute_matches(gt_boxes,
gt_class_ids,
gt_masks,
pred_boxes,
pred_class_ids,
pred_scores,
pred_masks,
iou_threshold=0.5,
score_threshold=0.0):
"""Finds matches between prediction and ground truth instances.
Returns
-------
gt_match: 1-D array
For each GT box it has the index of the matched predicted box.
pred_match: 1-D array
For each predicted box, it has the index of the matched ground truth box.
overlaps : [pred_boxes, gt_boxes]
IoU overlaps.
"""
# Trim zero padding
# TODO: cleaner to do zero unpadding upstream
gt_boxes = trim_zeros(gt_boxes)
gt_masks = gt_masks[..., :gt_boxes.shape[0]]
pred_boxes = trim_zeros(pred_boxes)
pred_scores = pred_scores[:pred_boxes.shape[0]]
# Sort predictions by score from high to low
indices | |
<filename>crates/iks-to-fks/convert-ik-to-fk.py
# The goal of this script is to convert an IK rig into an FK rig.
# We do this by creating a copy of your mesh and new FK rig for that mesh
#
# We do this by first duplicating our original rig and removing
# all of the IKs and constraints from our duplicate.
#
# We then bake the visual location and rotation into our FK rig
#
# Once this is done, the user can export their newly generated
# mesh and it's newly generated FK rig
#
# To run, install this file as an add on
bl_info = {
"name": "Convert IKs to FKs",
"category": "Rigging",
"blender": (2, 80, 0)
}
import bpy
class ConvertIKToFK(bpy.types.Operator):
"""Given a selected mesh and armature with IKs and constraints, generate a new mesh and FK armature that has the same animations"""
# Unique identifier for the add on
bl_idname = 'rigging.iktofk'
# Display name in the interface
bl_label = 'Convert IKs to FKs'
# Enable undo after executing the script
bl_options = {'REGISTER', 'UNDO'}
bl_category = 'Rigging'
def execute(self, context):
# We intentionally import bpy and math in here so that we can easily copy
# and paste the contents of this execute function into the Blender python
# console and have it work
import bpy
import sys
import math
ikArmature = None
originalMesh = None
# We first check if any of the already selected objects is a mesh that has a parent armature.
# If so we use that mesh and armature
for obj in bpy.context.selected_objects:
if obj.type == 'MESH' and obj.parent and obj.parent.type == 'ARMATURE':
originalMesh = obj
ikArmature = obj.parent
break
# If no mesh is selected, we look for the first object that we can find that has an armature as a parent
if originalMesh == None:
for obj in bpy.data.objects:
if obj.type == 'MESH' and obj.parent and obj.parent.type == 'ARMATURE':
originalMesh = obj
ikArmature = obj.parent
break
# Deselect all objects and then select ONLY our armature and all of its children
for obj in bpy.context.selected_objects:
obj.select_set(state = False)
if originalMesh != None and ikArmature != None:
originalMesh.select_set(state=True)
ikArmature.select_set(state=True)
if ikArmature != None:
# An active object is required in order to change into object mode
bpy.context.view_layer.objects.active = ikArmature
# If the armature is linked it will always be in object mode and we cannot mode_set.
if ikArmature.library == None:
bpy.ops.object.mode_set(mode = 'OBJECT')
# Select our mesh and armature so that we can duplicate them later
for mesh in ikArmature.children:
if mesh.type == 'MESH':
mesh.select_set(True)
# Make sure that we have an armature selected
if (len(list(bpy.context.selected_objects)) == 0):
print('Error: File' + bpy.path.basename(bpy.context.blend_data.filepath) + ' does not have any armatures.', file=sys.stderr)
originalActionsList = list(bpy.data.actions)
# Duplicate the selected armature and mesh so that if anything were to go wrong there is a backup.
#
# Our duplicate (the current active armature) will become our new FK armature
#
# It's important to use our duplicate because our original armature might have special bones set up such as
# boke hooks for a bezier curve. Trying to duplicate all of this onto a new armature would be difficult since
# we'd need to also duplicate these bezier curves / bone hook data.
bpy.ops.object.duplicate()
fkArmature = bpy.context.view_layer.objects.active
# Deselect all objects and then select ONLY our soon-to-be fkArmature
for obj in bpy.context.selected_objects:
obj.select_set(state = False)
fkArmature.select_set(state=True)
bpy.context.view_layer.objects.active = fkArmature
# Enable all armature layers. Without this bones on disabled layers wouldn't get keyed.
bpy.ops.object.mode_set(mode = 'EDIT')
bpy.ops.armature.armature_layers(layers=(True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True))
# Loop through all pose bones and make sure they are selected. Some of our commands require that the bones be selected
bpy.ops.object.mode_set(mode = 'POSE')
for poseBone in fkArmature.pose.bones:
poseBone.bone.select = True
# We iterate through the bones in the FK armature and remove all existing bone constraints
for bone in fkArmature.pose.bones:
for constraint in bone.constraints:
bone.constraints.remove(constraint)
# Now we remove all non deform bones from our FK armature
bpy.ops.object.mode_set(mode = 'EDIT')
for fkEditBone in bpy.data.armatures[fkArmature.data.name].edit_bones:
if fkEditBone.use_deform == False:
bpy.data.armatures[fkArmature.data.name].edit_bones.remove(fkEditBone)
# Iterate through every action so that we can bake all keyframes across all actions
for actionInfo in originalActionsList:
# Next we make our FK bones copy the transforms of their IK rig counterparts
# So bone1 in FK rig would copy transforms of bone1 in IK rig, and so on
# We do this for every action since we clear our transforms while baking
# visual keys below
bpy.ops.object.mode_set(mode = 'POSE')
for fkBone in bpy.context.selected_pose_bones:
copyTransforms = fkBone.constraints.new('COPY_TRANSFORMS')
copyTransforms.target = ikArmature
# the name of the bone in our original armature is the same as the name of our
# fkArmature bone the armature was duplicated. Therefore we us `fkBone.name`
copyTransforms.subtarget = fkBone.name
# Now that our FK rig is copying our IK rigs transforms, we insert visual keyframes
# for every keyframe. This gives our FK rigs the IK rigs transforms, after
# which we can then delete the IK rig
bpy.ops.object.mode_set(mode = 'OBJECT')
# Change to the action that we want to mimic
ikArmature.animation_data.action = bpy.data.actions.get(actionInfo.name)
fkArmature.animation_data.action = bpy.data.actions.get(actionInfo.name)
# Get all of the keyframes that are set for the rigs
keyframes = []
for fcurve in bpy.context.active_object.animation_data.action.fcurves:
for keyframe in fcurve.keyframe_points:
x, y = keyframe.co
# Don't know why yet, but we encounter each keyframes a
# bunch of times. so need to make sure we only add them once
if x not in keyframes:
# convert from float to int and insert into our keyframe list
keyframes.append((math.ceil(x)))
# If this action has no keyframes we skip it
if keyframes == []:
continue
# Keyframes might not always be in order so we sort them
keyframes.sort()
# Now we bake all of our keyframes and remove our copy transform constraints
bpy.ops.nla.bake(frame_start=keyframes[0], frame_end=keyframes[-1], only_selected=True, visual_keying=True, clear_constraints=True, use_current_action=True, bake_types={'POSE'})
# Bake adds extra keyframes, so we delete any keyframes that did not previously exist
bpy.ops.object.mode_set(mode = 'POSE')
# Delete generated keyframes that did not exist before this script
#
# FIXME: Do this on a per bone basis, so that no bone is keyed at frames that it wasn't keyed for
# previously.
for frame in range(keyframes[0], keyframes[-1]):
if frame not in keyframes:
bpy.context.scene.frame_set(frame)
# We set up the proper context to override the default for keyframe_delete.
# This fixes an issue where the `poll()` function on keyframe_delete was failing when run via blender CLI.
# In short.. we're just making sure that `keyframe_delete` uses the correct context
# When we run this addon from the command line.
screen = bpy.context.window.screen
for area in screen.areas:
if area.type == 'VIEW_3D':
for region in area.regions:
if region.type == 'WINDOW':
override = {'window': bpy.context.window, 'screen': screen, 'area': area, 'region': region, 'scene': bpy.context.scene, 'active_object': bpy.context.active_object, 'active_pose_bone': bpy.context.active_pose_bone, 'selected_pose_bones': bpy.context.selected_pose_bones}
bpy.ops.anim.keyframe_delete(override, type='LocRotScale')
# Delete all of the actions that were created when we duplicate our mesh and armature
#
# Unless the IK armature was linked. In which case we need to keep our new actions
# because our linked ones were not modified and do not have our new data.
if ikArmature.library is None:
for action in bpy.data.actions:
if action not in originalActionsList:
bpy.data.actions.remove(action)
else:
for action in bpy.data.actions:
if action.library != None:
bpy.data.actions.remove(action)
# Go to Object mode so that they can export their new model
bpy.ops.object.mode_set(mode = 'OBJECT')
return {'FINISHED'}
def register():
bpy.utils.register_class(ConvertIKToFK)
def unregister():
bpy.utils.unregister_class(ConvertIKToFK)
# This allows you to run the script directly from blenders text editor
# to test the addon without having to install it as an add on.
# Hit `space` then search for `convert Iks to Fks`
#
# Alternatively, you can paste the contents of the execute script
# into your Blender Python console, just make sure to remove all `return`
# | |
<filename>mindfirl/mindfirl.py
import flask
from flask import Flask, render_template, redirect, url_for, session, jsonify, request, send_from_directory
from flask_login import LoginManager, login_user, logout_user, login_required, current_user
import redis
from wtforms.fields import SelectField, FileField, FloatField, SelectMultipleField, TextAreaField
from wtforms.fields import core, html5, simple
from wtforms import Form, validators, widgets
from urllib.parse import urlparse, urljoin
import os
import time
import json
from user import User, auth_user, register_user
from mutil import r
from flask_pymongo import PyMongo
import storage_model
import data_model as dm
import data_loader as dl
import user_data as ud
import config
app = Flask(__name__)
app.secret_key = '<KEY>'
#CORS(app) # very important!
if 'DYNO' in os.environ:
# 1. Create new project in heroku.
# 2. Add Heroku Redis extension
# 3. Add mLabMongoDB extension
# 4. Click on the extension link in the project dashboard. It will take you to the mongo DB sandbox page
# 5. Go to users and create a new user (eg: John) and password (eg: <PASSWORD>)
# 6. Copy the mongo db uri they provide. It will look something like this:
# mongodb://<dbuser>:<dbpassword>@df784663.mlab.com:47668/heroku_xxxx
# 7. Replace the user and password with what you just created: mongodb://John:Abcd1234>@df784663.mlab.com:47668/heroku_xxxx
# 8. Use this link as your mongodb uri in the application you push to Heroku.
app.config['MONGO_URI'] = 'mongodb://mindfirl:mindfirl123@ds139775.mlab.com:39775/heroku_6dfr3hn9'
else:
app.config['MONGO_URI'] = 'mongodb://localhost:27017/mindfirl'
mongo = PyMongo(app)
login_manager = LoginManager()
login_manager.init_app(app)
login_manager.login_view = 'login'
login_manager.login_message = 'please login!'
login_manager.session_protection = 'strong'
r.set('username_sysubo', '<PASSWORD>')
r.set('username_admin', 'admin')
def is_safe_url(target):
ref_url = urlparse(request.host_url)
test_url = urlparse(urljoin(request.host_url, target))
return test_url.scheme in ('http', 'https') and \
ref_url.netloc == test_url.netloc
@login_manager.user_loader
def load_user(user_id):
return User.get(mongo=mongo, uid=user_id)
class LoginForm(Form):
'''Form'''
name = simple.StringField(
label="Username",
widget=widgets.TextInput(),
validators=[
validators.DataRequired(message="username cannot be empty"),
validators.Length(max=20,min=5,message="The length of username should between 5 and 20")
],
render_kw={"class":"form-control"}
)
pwd = simple.PasswordField(
label="password",
validators=[
validators.DataRequired(message="password cannot be empty"),
validators.Length(max=30,min=5,message="The length of password should between 5 and 30"),
],
widget=widgets.PasswordInput(),
render_kw={"class":"form-control"}
)
class SignupForm(Form):
name = simple.StringField(
label="Username",
widget=widgets.TextInput(),
validators=[
validators.DataRequired(message="username cannot be empty"),
validators.Length(max=20,min=5,message="The length of username should between 5 and 20")
],
render_kw={"class":"form-control"}
)
pwd = simple.PasswordField(
label="password",
validators=[
validators.DataRequired(message="password cannot be empty"),
validators.Length(max=30,min=5,message="The length of password should between 5 and 30"),
],
widget=widgets.PasswordInput(),
render_kw={"class":"form-control"}
)
class ProjectForm(Form):
project_name = simple.StringField(
label="Project name*",
widget=widgets.TextInput(),
validators=[
validators.DataRequired(message="project name cannot be empty"),
validators.Length(max=50,min=1,message="The length of username should between 1 and 50"),
validators.Regexp('^[a-zA-Z0-9_.-]*$', flags=0, message="must be characters or digits only.")
],
render_kw={"class":"form-control"}
)
project_des = simple.StringField(
label="Project description (optional)",
widget=widgets.TextInput(),
validators=[
validators.Length(max=50,min=0,message="The length of username should between 0 and 50")
],
render_kw={"class":"form-control"}
)
data1 = FileField(u'Pair File (csv)*', render_kw={"class":"custom-file-input"}, validators=[validators.Optional()])
data2 = FileField(u'Name Frequency (csv)*', render_kw={"class":"custom-file-input"}, validators=[validators.Optional()])
assignee_area = TextAreaField(u'Assignee', [validators.optional(), validators.length(max=200)], render_kw={"class":"form-control", "id": "assignee_area"})
class ProjectForm2(Form):
project_name = simple.StringField(
label="Project name*",
widget=widgets.TextInput(),
validators=[
validators.DataRequired(message="project name cannot be empty"),
validators.Length(max=50,min=1,message="The length of username should between 1 and 50"),
validators.Regexp('^[a-zA-Z0-9_.-]*$', flags=0, message="must be characters or digits only.")
],
render_kw={"class":"form-control"}
)
project_des = simple.StringField(
label="Project description (optional)",
widget=widgets.TextInput(),
validators=[
validators.Length(max=50,min=0,message="The length of username should between 0 and 50")
],
render_kw={"class":"form-control"}
)
data1 = FileField(u'Data File 1 (csv)*', render_kw={"class":"custom-file-input"}, validators=[validators.Optional()])
data2 = FileField(u'Data File 2 (csv)*', render_kw={"class":"custom-file-input"}, validators=[validators.Optional()])
blocking_choices = [('id', 'ID'), ('fn', 'Firstname'), ('ln', 'Lastname'), ('bd', 'DoB'), ('gd', 'Gender'), ('rc', 'Race')]
blocking = SelectMultipleField('Blocking*', choices=blocking_choices, render_kw={"class":"form-control selectpicker"})
'''
assignto = SelectField(
u'Assign to',
choices=[],
render_kw={"class":"form-control selectpicker", "data-live-search": "ture"}
)
kapr = FloatField('Privacy budget', [validators.NumberRange(min=0, max=100, message="Please enter a valid value.")], render_kw={"class":"form-control"})
'''
assignee_area = TextAreaField(u'Assignee', [validators.optional(), validators.length(max=200)], render_kw={"class":"form-control", "id": "assignee_area"})
class BlockForm(Form):
blocking_choices = [('id', 'ID'), ('fn', 'Firstname'), ('ln', 'Lastname'), ('bd', 'DoB'), ('gd', 'Gender'), ('rc', 'Race')]
blocking = SelectMultipleField('Blocking', choices=blocking_choices, render_kw={"class":"form-control selectpicker"})
assignee_area = TextAreaField(u'Assignee', [validators.optional(), validators.length(max=200)], render_kw={"class":"form-control", "id": "assignee_area"})
@app.route("/dashboard")
@login_required
def dashboard():
user = current_user
if user.username == 'admin':
users = mongo.db.mindfirl.users.find()
return render_template('admin_dashboard.html', users=users)
logout_link = '<a href="/logout">log out</a>'
return "Dashboard. [" + user.username + "] " + logout_link
@app.route('/login',methods=["GET","POST"])
def login():
if request.method =="GET":
form = LoginForm()
return render_template("login.html",form=form)
else:
form = LoginForm(formdata=request.form)
if form.validate():
user = auth_user(mongo=mongo, data=form.data)
if user:
login_user(user)
#flask.flash('Logged in successfully.')
next = flask.request.args.get('next')
if not is_safe_url(next):
return flask.abort(400)
return flask.redirect(next or flask.url_for('index'))
else:
flask.flash('Incorrect username or password. Please try again.', 'alert-danger')
else:
print(form.errors, "login error")
return render_template("login.html", form=form)
@app.route('/logout')
@login_required
def logout():
logout_user()
flask.flash('Logged out successfully.', 'alert-success')
return redirect(url_for('login'))
@app.route('/register',methods=["GET","POST"])
def signup():
if request.method =="GET":
form = SignupForm()
return render_template("register.html",form=form)
else:
form = SignupForm(formdata=request.form)
if form.validate():
data = form.data
if data['name'] == data['pwd']:
flask.flash('Cannot use username as password.', 'alert-danger')
return redirect(url_for('signup'))
user = register_user(mongo=mongo, data=form.data)
if user:
flask.flash('Register successful. Please login now.', 'alert-success')
return redirect(url_for('login'))
else:
print('failed.')
flask.flash('Username exist.', 'alert-danger')
else:
print(form.errors, "signup error")
return render_template("register.html", form=form)
@app.route("/")
@app.route("/index")
def index():
return redirect(url_for('project'))
@app.route("/faq")
def faq():
return render_template("faq2.html")
@app.route("/faq2")
def faq2():
return render_template("faq2.html")
@app.route('/project')
@login_required
def project():
user = current_user
projects = storage_model.get_projects_by_owner(mongo=mongo, owner=user.username)
projects = list(projects)
# calculate projects progress
for p in projects:
assignee_stat = p['assignee_stat']
pair_idx, total_pairs = 0, 0
for assignee in assignee_stat:
pair_idx += int(assignee['pair_idx'])
total_pairs += int(assignee['total_pairs'])
progress = float(pair_idx)/total_pairs
progress = round(100*progress, 2)
if progress > 100:
progress = 100
p['progress'] = progress
assignments = storage_model.get_projects_assigned(mongo=mongo, user=user.username)
assignments = list(assignments)
for a in assignments:
assignee_stat = a['assignee_stat']
finished_page, total_page = 0, 0
for assignee in assignee_stat:
if assignee['assignee'] == user.username:
pair_idx = int(assignee['pair_idx'])
total_pairs = int(assignee['total_pairs'])
progress = float(pair_idx)/total_pairs
progress = round(100*progress, 2)
if progress > 100:
progress = 100
a['progress'] = progress
break
kapr = round(100*float(assignee_stat[0]['current_kapr']), 1)
a['budget'] = kapr
data = {
'projects': projects[:3],
'assignments': assignments[:3]
}
return render_template("project.html", data=data)
@app.route('/project_list')
@login_required
def project_list():
user = current_user
projects = storage_model.get_projects_by_owner(mongo=mongo, owner=user.username)
projects = list(projects)
# calculate projects progress
for p in projects:
assignee_stat = p['assignee_stat']
pair_idx, total_pairs = 0, 0
for assignee in assignee_stat:
pair_idx += int(assignee['pair_idx'])
total_pairs += int(assignee['total_pairs'])
progress = float(pair_idx)/total_pairs
progress = round(100*progress, 2)
if progress > 100:
progress = 100
p['progress'] = progress
data = {
'projects': projects,
}
return render_template("project_list.html", data=data)
@app.route('/assignment_list')
@login_required
def assignment_list():
user = current_user
assignments = storage_model.get_projects_assigned(mongo=mongo, user=user.username)
assignments = list(assignments)
for a in assignments:
assignee_stat = a['assignee_stat']
for assignee in assignee_stat:
if assignee['assignee'] == user.username:
pair_idx = int(assignee['pair_idx'])
total_pairs = int(assignee['total_pairs'])
progress = float(pair_idx)/total_pairs
progress = round(100*progress, 2)
if progress > 100:
progress = 100
kapr = round(100*float(assignee['current_kapr']), 1)
break
a['progress'] = progress
a['budget'] = kapr
data = {
'assignments': assignments
}
return render_template("assignment_list.html", data=data)
@app.route('/createProject')
@login_required
def create_project():
form = ProjectForm()
all_users = storage_model.get_all_users(mongo=mongo)
user_list = [u['username'] for u in all_users]
user_list = sorted(user_list)
data = {'users': user_list}
return render_template("createProject.html", form=form, data=data)
@app.route('/createProject2')
@login_required
def create_project2():
form = ProjectForm2()
#all_users = storage_model.get_all_users(mongo=mongo)
#user_list = [(u['username'], u['username']) for u in all_users]
#form.assignto.choices = user_list
all_users = storage_model.get_all_users(mongo=mongo)
user_list = [u['username'] for u in all_users]
user_list = sorted(user_list)
data = {'users': user_list}
return render_template("createProject2.html", form=form, data=data)
@app.route('/saveProject', methods=["POST"])
@login_required
def save_project():
user = current_user
form = ProjectForm(formdata=request.form)
# because current software do not delete users, so no worry about different user list before and after
all_users = storage_model.get_all_users(mongo=mongo)
user_list = [u['username'] for u in all_users]
users = {'users': user_list}
if form.validate():
if 'data1' not in request.files or 'data2' not in request.files:
flask.flash('lack data files.', 'alert-danger')
return render_template("createProject.html", form=form, data=users)
data = form.data
pair_file = request.files['data1']
name_freq_file = request.files['data2']
data['pair_file'] = pair_file
data['name_freq_file'] = name_freq_file
data['owner'] = user.username
if storage_model.project_name_existed(mongo=mongo, data=data):
flask.flash('project name existed. Please use another project name.', 'alert-danger')
return render_template("createProject.html", form=form, data=users)
pid = storage_model.save_project(mongo=mongo, data=data)
# create result file
filename = os.path.join(config.DATA_DIR, 'result', pid+'.csv')
f = open(filename, 'w+')
f.close()
return redirect(url_for('project'))
else:
print(form.errors, "project creating error")
return render_template("createProject.html", form=form, data=users)
@app.route('/saveProject2', methods=["POST"])
@login_required
def save_project2():
"""
creating project by blocking
"""
user = current_user
form = ProjectForm2(formdata=request.form)
# because current software do not delete users, so no worry about different user list before and after
all_users = storage_model.get_all_users(mongo=mongo)
user_list = [u['username'] for u in all_users]
users = {'users': user_list}
if form.validate():
if 'data1' not in request.files or 'data2' not in request.files:
flask.flash('lack data files.', 'alert-danger')
return render_template("createProject2.html", form=form, data=users)
data = form.data
file1 = request.files['data1']
file2 = request.files['data2']
data['file1'] = file1
data['file2'] = file2
data['owner'] = user.username
if storage_model.project_name_existed(mongo=mongo, data=data):
flask.flash('project name existed.', 'alert-danger')
return render_template("createProject2.html", form=form, data=users)
pid = storage_model.save_project2(mongo=mongo, data=data)
return redirect(url_for('project'))
else:
print(form.errors, "project creating error")
return render_template("createProject2.html", form=form, data=users)
@app.route('/project/<pid>')
@login_required
def project_detail(pid):
user = current_user
project = storage_model.get_project_by_pid(mongo=mongo, pid=pid)
if not project:
return page_not_found('page_not_found')
if project['owner'] != user.username:
return forbidden()
assignee_stat = project['assignee_stat']
pair_idx, total_pairs = 0, 0
for assignee in assignee_stat:
pair_idx += int(assignee['pair_idx'])
| |
it out.
self.assertEqual(egress_message_annotations['work'], 'hard')
self.assertEqual(egress_message_annotations['x-opt-qd.trace'], ['0/QDR.1', '0/QDR'])
M1.stop()
M2.stop()
# Dont send any pre-existing ingress or trace annotations. Make sure that there are no outgoing message annotations
# stripAnnotations property is set to "both"
def test_08a_test_strip_message_annotations_both(self):
addr = self.router.addresses[2]+"/strip_message_annotations_both/1"
M1 = self.messenger()
M2 = self.messenger()
M1.start()
M2.start()
M2.subscribe(addr)
ingress_message = Message()
ingress_message.address = addr
ingress_message.body = {'message': 'Hello World!'}
#Put and send the message
M1.put(ingress_message)
M1.send()
# Receive the message
M2.recv(1)
egress_message = Message()
M2.get(egress_message)
self.assertEqual(egress_message.annotations, None)
M1.stop()
M2.stop()
# Dont send any pre-existing ingress or trace annotations. Send in a custom annotation.
# Make sure that the custom annotation comes out and nothing else.
# stripAnnotations property is set to "both"
def test_08a_test_strip_message_annotations_both_custom(self):
addr = self.router.addresses[2]+"/strip_message_annotations_both/1"
M1 = self.messenger()
M2 = self.messenger()
M1.start()
M2.start()
M2.subscribe(addr)
ingress_message = Message()
ingress_message.address = addr
ingress_message.body = {'message': 'Hello World!'}
# Only annotations with prefix "x-opt-qd." will be stripped
ingress_message_annotations = {'stay': 'humble', 'x-opt-qd': 'work'}
ingress_message.annotations = ingress_message_annotations
#Put and send the message
M1.put(ingress_message)
M1.send()
# Receive the message
M2.recv(1)
egress_message = Message()
M2.get(egress_message)
self.assertEqual(egress_message.annotations, ingress_message_annotations)
M1.stop()
M2.stop()
#Dont send any pre-existing ingress or trace annotations. Make sure that there are no outgoing message annotations
#stripAnnotations property is set to "out"
def test_08a_test_strip_message_annotations_out(self):
addr = self.router.addresses[3]+"/strip_message_annotations_out/1"
M1 = self.messenger()
M2 = self.messenger()
M1.start()
M2.start()
M2.subscribe(addr)
ingress_message = Message()
ingress_message.address = addr
ingress_message.body = {'message': 'Hello World!'}
#Put and send the message
M1.put(ingress_message)
M1.send()
# Receive the message
M2.recv(1)
egress_message = Message()
M2.get(egress_message)
self.assertEqual(egress_message.annotations, None)
M1.stop()
M2.stop()
#Send in pre-existing trace and ingress and annotations and make sure that they are not in the outgoing annotations.
#stripAnnotations property is set to "in"
def test_08a_test_strip_message_annotations_in(self):
addr = self.router.addresses[4]+"/strip_message_annotations_in/1"
M1 = self.messenger()
M2 = self.messenger()
M1.start()
M2.start()
M2.subscribe(addr)
ingress_message = Message()
ingress_message.address = addr
ingress_message.body = {'message': 'Hello World!'}
##
## Pre-existing ingress and trace
##
ingress_message_annotations = {'x-opt-qd.ingress': 'ingress-router', 'x-opt-qd.trace': ['0/QDR.1']}
ingress_message.annotations = ingress_message_annotations
#Put and send the message
M1.put(ingress_message)
M1.send()
# Receive the message
M2.recv(1)
egress_message = Message()
M2.get(egress_message)
#Make sure 'Hello World!' is in the message body dict
self.assertEqual('Hello World!', egress_message.body['message'])
egress_message_annotations = egress_message.annotations
self.assertEqual(egress_message_annotations.__class__, dict)
self.assertEqual(egress_message_annotations['x-opt-qd.ingress'], '0/QDR')
self.assertEqual(egress_message_annotations['x-opt-qd.trace'], ['0/QDR'])
M1.stop()
M2.stop()
def test_09_management(self):
addr = "amqp:/$management"
M = self.messenger()
M.start()
M.route("amqp:/*", self.address+"/$1")
sub = M.subscribe("amqp:/#")
reply = sub.address
request = Message()
response = Message()
request.address = addr
request.reply_to = reply
request.correlation_id = "C1"
request.properties = {u'type':u'org.amqp.management', u'name':u'self', u'operation':u'GET-MGMT-NODES'}
M.put(request)
M.send()
M.recv()
M.get(response)
assert response.properties['statusCode'] == 200, response.properties['statusCode']
self.assertEqual(response.correlation_id, "C1")
self.assertEqual(response.body, [])
request.address = addr
request.reply_to = reply
request.correlation_id = 135
request.properties = {u'type':u'org.amqp.management', u'name':u'self', u'operation':u'GET-MGMT-NODES'}
M.put(request)
M.send()
M.recv()
M.get(response)
self.assertEqual(response.properties['statusCode'], 200)
self.assertEqual(response.correlation_id, 135)
self.assertEqual(response.body, [])
request.address = addr
request.reply_to = reply
request.properties = {u'type':u'org.amqp.management', u'name':u'self', u'operation':u'GET-MGMT-NODES'}
M.put(request)
M.send()
M.recv()
M.get(response)
self.assertEqual(response.properties['statusCode'], 200)
self.assertEqual(response.body, [])
M.stop()
def test_09a_management_no_reply(self):
addr = "amqp:/$management"
M = self.messenger()
M.start()
M.route("amqp:/*", self.address+"/$1")
request = Message()
request.address = addr
request.correlation_id = "C1"
request.properties = {u'type':u'org.amqp.management', u'name':u'self', u'operation':u'GET-MGMT-NODES'}
M.put(request)
M.send()
M.put(request)
M.send()
M.stop()
def test_09c_management_get_operations(self):
addr = "amqp:/_local/$management"
M = self.messenger()
M.start()
M.route("amqp:/*", self.address+"/$1")
sub = M.subscribe("amqp:/#")
reply = sub.address
request = Message()
response = Message()
##
## Unrestricted request
##
request.address = addr
request.reply_to = reply
request.properties = {u'type':u'org.amqp.management', u'name':u'self', u'operation':u'GET-OPERATIONS'}
M.put(request)
M.send()
M.recv()
M.get(response)
self.assertEqual(response.properties['statusCode'], 200)
self.assertEqual(response.body.__class__, dict)
self.assertTrue('org.apache.qpid.dispatch.router' in response.body.keys())
self.assertTrue(len(response.body.keys()) > 2)
self.assertTrue(response.body['org.apache.qpid.dispatch.router'].__class__, list)
M.stop()
def test_09d_management_not_implemented(self):
addr = "amqp:/$management"
M = self.messenger()
M.start()
M.route("amqp:/*", self.address+"/$1")
sub = M.subscribe("amqp:/#")
reply = sub.address
request = Message()
response = Message()
##
## Request with an invalid operation
##
request.address = addr
request.reply_to = reply
request.properties = {u'type':u'org.amqp.management', u'name':u'self', u'operation':u'NOT-IMPL'}
M.put(request)
M.send()
M.recv()
M.get(response)
self.assertEqual(response.properties['statusCode'], 501)
M.stop()
def test_10_semantics_multicast(self):
addr = self.address+"/multicast.10"
M1 = self.messenger()
M2 = self.messenger()
M3 = self.messenger()
M4 = self.messenger()
M1.start()
M2.start()
M3.start()
M4.start()
M2.subscribe(addr)
M3.subscribe(addr)
M4.subscribe(addr)
tm = Message()
rm = Message()
tm.address = addr
for i in range(100):
tm.body = {'number': i}
M1.put(tm)
M1.send()
for i in range(100):
M2.recv(1)
M2.get(rm)
self.assertEqual(i, rm.body['number'])
M3.recv(1)
M3.get(rm)
self.assertEqual(i, rm.body['number'])
M4.recv(1)
M4.get(rm)
self.assertEqual(i, rm.body['number'])
M1.stop()
M2.stop()
M3.stop()
M4.stop()
def test_11_semantics_closest(self):
addr = self.address+"/closest.1"
M1 = self.messenger()
M2 = self.messenger()
M3 = self.messenger()
M4 = self.messenger()
M1.start()
M2.start()
M3.start()
M4.start()
M2.subscribe(addr)
M3.subscribe(addr)
M4.subscribe(addr)
tm = Message()
rm = Message()
tm.address = addr
for i in range(30):
tm.body = {'number': i}
M1.put(tm)
M1.send()
i = 0
rx_set = []
for i in range(10):
M2.recv(1)
M2.get(rm)
rx_set.append(rm.body['number'])
M3.recv(1)
M3.get(rm)
rx_set.append(rm.body['number'])
M4.recv(1)
M4.get(rm)
rx_set.append(rm.body['number'])
self.assertEqual(30, len(rx_set))
rx_set.sort()
for i in range(30):
self.assertEqual(i, rx_set[i])
M1.stop()
M2.stop()
M3.stop()
M4.stop()
def test_12_semantics_spread(self):
addr = self.address+"/spread.1"
M1 = self.messenger()
M2 = self.messenger()
M3 = self.messenger()
M4 = self.messenger()
M2.timeout = 0.1
M3.timeout = 0.1
M4.timeout = 0.1
M1.start()
M2.start()
M3.start()
M4.start()
M2.subscribe(addr)
M3.subscribe(addr)
M4.subscribe(addr)
tm = Message()
rm = Message()
tm.address = addr
for i in range(30):
tm.body = {'number': i}
M1.put(tm)
M1.send()
i = 0
rx_set = []
ca = 0
cb = 0
cc = 0
while len(rx_set) < 30:
try:
M2.recv(1)
M2.get(rm)
rx_set.append(rm.body['number'])
ca += 1
except:
pass
try:
M3.recv(1)
M3.get(rm)
rx_set.append(rm.body['number'])
cb += 1
except:
pass
try:
M4.recv(1)
M4.get(rm)
rx_set.append(rm.body['number'])
cc += 1
except:
pass
self.assertEqual(30, len(rx_set))
self.assertTrue(ca > 0)
self.assertTrue(cb > 0)
self.assertTrue(cc > 0)
rx_set.sort()
for i in range(30):
self.assertEqual(i, rx_set[i])
M1.stop()
M2.stop()
M3.stop()
M4.stop()
def test_13_to_override(self):
addr = self.address+"/toov/1"
M1 = self.messenger()
M2 = self.messenger()
M1.start()
M2.start()
M2.subscribe(addr)
tm = Message()
rm = Message()
tm.address = addr
##
## Pre-existing TO
##
tm.annotations = {'x-opt-qd.to': 'toov/1'}
for i in range(10):
tm.body = {'number': i}
M1.put(tm)
M1.send()
for i in range(10):
M2.recv(1)
M2.get(rm)
self.assertEqual(i, rm.body['number'])
ma = rm.annotations
self.assertEqual(ma.__class__, dict)
self.assertEqual(ma['x-opt-qd.to'], 'toov/1')
M1.stop()
M2.stop()
def test_14_send_settle_mode_settled(self):
"""
The receiver sets a snd-settle-mode of settle thus indicating that it wants to receive settled messages from
the sender. This tests make sure that the delivery that comes to the receiver comes as already settled.
"""
send_settle_mode_test = SndSettleModeTest(self.address)
send_settle_mode_test.run()
self.assertTrue(send_settle_mode_test.message_received)
self.assertTrue(send_settle_mode_test.delivery_already_settled)
def test_15_excess_deliveries_released(self):
"""
Message-route a series of deliveries where the receiver provides credit for a subset and
once received, closes the link. The remaining deliveries should be released back to the sender.
"""
test = ExcessDeliveriesReleasedTest(self.address)
test.run()
self.assertEqual(None, test.error)
def test_16_multicast_unsettled(self):
test = MulticastUnsettledTest(self.address)
test.run()
self.assertEqual(None, test.error)
def test_17_multiframe_presettled(self):
test = MultiframePresettledTest(self.address)
test.run()
self.assertEqual(None, test.error)
def test_18_released_vs_modified(self):
test = ReleasedVsModifiedTest(self.address)
test.run()
self.assertEqual(None, test.error)
def test_19_appearance_of_balance(self):
test = AppearanceOfBalanceTest(self.address)
test.run()
self.assertEqual(None, test.error)
def test_20_batched_settlement(self):
test = BatchedSettlementTest(self.address)
test.run()
self.assertEqual(None, test.error)
def test_21_presettled_overflow(self):
test = PresettledOverflowTest(self.address)
test.run()
self.assertEqual(None, test.error)
def test_connection_properties(self):
connection = BlockingConnection(self.router.addresses[0],
timeout=60,
properties=CONNECTION_PROPERTIES)
client = SyncRequestResponse(connection)
node = Node.connect(self.router.addresses[0])
results = [[{u'connection': u'properties', u'int_property': 6451}], [{}]]
self.assertEqual(node.query(type='org.apache.qpid.dispatch.connection', attribute_names=['properties']).results,
results)
client.connection.close()
class Timeout(object):
def __init__(self, parent):
self.parent = parent
def on_timer_task(self, event):
self.parent.timeout()
HELLO_WORLD = "Hello World!"
class SndSettleModeTest(MessagingHandler):
def __init__(self, address):
super(SndSettleModeTest, self).__init__()
self.address = address
self.sender = None
self.receiver = None
self.message_received = False
self.delivery_already_settled = False
def on_start(self, event):
conn = event.container.connect(self.address)
# The receiver sets link.snd_settle_mode = Link.SND_SETTLED. It wants to receive settled messages
self.receiver = event.container.create_receiver(conn, "org/apache/dev", options=AtMostOnce())
# With AtLeastOnce, the sender will not settle.
self.sender = event.container.create_sender(conn, "org/apache/dev", options=AtLeastOnce())
def on_sendable(self, event):
msg = Message(body=HELLO_WORLD)
event.sender.send(msg)
event.sender.close()
def on_message(self, event):
self.delivery_already_settled = event.delivery.settled
if HELLO_WORLD == event.message.body:
self.message_received = True
else:
self.message_received = False
event.connection.close()
def run(self):
Container(self).run()
class ExcessDeliveriesReleasedTest(MessagingHandler):
def __init__(self, address):
super(ExcessDeliveriesReleasedTest, self).__init__(prefetch=0)
self.address = address
self.dest = "closest.EDRtest"
self.error = None
self.sender = None
self.receiver = None
self.n_sent = 0
self.n_received = 0
self.n_accepted = 0
self.n_released = 0
def on_start(self, event):
conn = event.container.connect(self.address)
self.sender = event.container.create_sender(conn, self.dest)
self.receiver = event.container.create_receiver(conn, self.dest)
self.receiver.flow(6)
def on_sendable(self, event):
for i in range(10 - self.n_sent):
msg = Message(body=i)
event.sender.send(msg)
self.n_sent += 1
def on_accepted(self, event):
self.n_accepted += 1
def on_released(self, event):
self.n_released += 1
if self.n_released == 4:
if self.n_accepted != 6:
self.error = "Expected 6 accepted, got %d" % self.n_accepted
if self.n_received != 6:
self.error = "Expected 6 received, got %d" % self.n_received
event.connection.close()
def on_message(self, event):
self.n_received += 1
if self.n_received == 6:
self.receiver.close()
def run(self):
Container(self).run()
class MulticastUnsettledTest(MessagingHandler):
def __init__(self, address):
super(MulticastUnsettledTest, self).__init__(prefetch=0)
self.address | |
"""
s = """<?xml version="1.0"?>
<Map>
<Stylesheet>
Map { map-bgcolor: #fff; }
Layer
{
polygon-fill: #999;
line-color: #fff;
line-width: 1;
outline-color: #000;
outline-width: 1;
}
Layer name
{
text-face-name: 'Comic Sans';
text-size: 14;
text-fill: #f90;
}
</Stylesheet>
<Datasource name="template">
<Parameter name="type">shape</Parameter>
<Parameter name="encoding">latin1</Parameter>
<Parameter name="base">data</Parameter>
</Datasource>
<Layer srs="+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs">
<Datasource base="template">
<Parameter name="file">test.shp</Parameter>
</Datasource>
</Layer>
<Layer srs="+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs">
<Datasource base="template">
<Parameter name="file">test.shp</Parameter>
</Datasource>
</Layer>
</Map>
"""
self.doCompile1(s)
# run the same test with a datasourcesconfig
dscfg = """<?xml version="1.0"?>
<Map>
<Stylesheet>
Map { map-bgcolor: #fff; }
Layer
{
polygon-fill: #999;
line-color: #fff;
line-width: 1;
outline-color: #000;
outline-width: 1;
}
Layer name
{
text-face-name: 'Comic Sans';
text-size: 14;
text-fill: #f90;
}
</Stylesheet>
<DataSourcesConfig>
[DEFAULT]
default_layer_srs = epsg:4326
other_srs = epsg:4326
[template1]
type=shape
layer_srs=%(default_layer_srs)s
encoding=latin1
base=data
[test_shp]
file=test.shp
template=template1
[test_shp_2]
type=shape
encoding=latin1
base=data
layer_srs=%(other_srs)s
</DataSourcesConfig>
<Layer source_name="test_shp" />
<Layer source_name="test_shp_2" />
</Map>
"""
map = self.doCompile1(dscfg)
self.assertEqual(map.layers[1].srs, '+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs')
handle, cfgpath = tempfile.mkstemp()
os.close(handle)
try:
open(cfgpath, 'w').write("[DEFAULT]\nother_srs=epsg:900913")
map = self.doCompile1(dscfg, datasources_cfg=cfgpath)
self.assertEqual(map.layers[1].srs, '+proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 +y_0=0 +k=1.0 +units=m +nadgrids=@null +wktext +no_defs')
finally:
os.unlink(cfgpath)
def doCompile1(self, s, **kwargs):
map = compile(s, self.dirs, **kwargs)
self.assertEqual(2, len(map.layers))
self.assertEqual(3, len(map.layers[0].styles))
self.assertEqual(1, len(map.layers[0].styles[0].rules))
self.assertEqual(1, len(map.layers[0].styles[0].rules[0].symbolizers))
self.assertEqual(color(0x99, 0x99, 0x99), map.layers[0].styles[0].rules[0].symbolizers[0].color)
self.assertEqual(1.0, map.layers[0].styles[0].rules[0].symbolizers[0].opacity)
self.assertEqual(1, len(map.layers[0].styles[1].rules))
self.assertEqual(2, len(map.layers[0].styles[1].rules[0].symbolizers))
self.assertEqual(color(0x00, 0x00, 0x00), map.layers[0].styles[1].rules[0].symbolizers[0].color)
self.assertEqual(color(0xFF, 0xFF, 0xFF), map.layers[0].styles[1].rules[0].symbolizers[1].color)
self.assertEqual(3.0, map.layers[0].styles[1].rules[0].symbolizers[0].width)
self.assertEqual(1.0, map.layers[0].styles[1].rules[0].symbolizers[1].width)
self.assertEqual(1, len(map.layers[0].styles[2].rules))
self.assertEqual(1, len(map.layers[0].styles[2].rules[0].symbolizers))
self.assertEqual(strings('Comic Sans'), map.layers[0].styles[2].rules[0].symbolizers[0].face_name)
self.assertEqual(14, map.layers[0].styles[2].rules[0].symbolizers[0].size)
self.assertEqual(map.layers[0].srs, '+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs')
self.assertEqual(os.path.basename(map.layers[0].datasource.parameters['file']), 'test.shp')
self.assertEqual(map.layers[0].datasource.parameters['encoding'], 'latin1')
self.assertEqual(map.layers[0].datasource.parameters['type'], 'shape')
return map
def testCompile2(self):
"""
"""
s = """<?xml version="1.0"?>
<Map>
<Stylesheet>
Map { map-bgcolor: #fff; }
Layer
{
polygon-fill: #999;
polygon-opacity: 0.5;
line-color: #fff;
line-width: 2;
outline-color: #000;
outline-width: 1;
}
Layer name
{
text-face-name: 'Comic Sans';
text-size: 14;
text-fill: #f90;
}
</Stylesheet>
<Datasource name="template">
<Parameter name="type">shape</Parameter>
<Parameter name="encoding">latin1</Parameter>
</Datasource>
<Layer>
<Datasource base="template">
<Parameter name="type">shape</Parameter>
<Parameter name="file">%(data)s/test.shp</Parameter>
</Datasource>
</Layer>
</Map>
""" % self.__dict__
map = compile(s, self.dirs)
mmap = mapnik.Map(640, 480)
map.to_mapnik(mmap)
(handle, path) = tempfile.mkstemp(suffix='.xml', prefix='cascadenik-mapnik-')
os.close(handle)
mapnik.save_map(mmap, path)
doc = xml.etree.ElementTree.parse(path)
map_el = doc.getroot()
#print open(path, 'r').read()
os.unlink(path)
self.assertEqual(3, len(map_el.findall('Style')))
self.assertEqual(1, len(map_el.findall('Layer')))
self.assertEqual(3, len(map_el.find('Layer').findall('StyleName')))
for stylename_el in map_el.find('Layer').findall('StyleName'):
self.assertTrue(stylename_el.text in [style_el.get('name') for style_el in map_el.findall('Style')])
for style_el in map_el.findall('Style'):
if style_el.get('name').startswith('polygon style '):
self.assertEqual(1, len(style_el.find('Rule').findall('PolygonSymbolizer')))
if style_el.get('name').startswith('line style '):
self.assertEqual(2, len(style_el.find('Rule').findall('LineSymbolizer')))
if style_el.get('name').startswith('text style '):
self.assertEqual(1, len(style_el.find('Rule').findall('TextSymbolizer')))
self.assertEqual(len(map_el.find("Layer").findall('Datasource')), 1)
params = dict(((p.get('name'), p.text) for p in map_el.find('Layer').find('Datasource').findall('Parameter')))
self.assertEqual(params['type'], 'shape')
self.assertTrue(params['file'].endswith('%s/test.shp' % self.data))
self.assertEqual(params['encoding'], 'latin1')
def testCompile3(self):
"""
"""
map = output.Map(layers=[
output.Layer('this',
output.Datasource(type="shape",file="%s/test.shp" % self.data), [
output.Style('a style', [
output.Rule(
output.MinScaleDenominator(1),
output.MaxScaleDenominator(100),
output.Filter("[this] = 'that'"),
[
output.PolygonSymbolizer(color(0xCC, 0xCC, 0xCC))
])
])
]),
output.Layer('that',
output.Datasource(type="shape",file="%s/test.shp" % self.data), [
output.Style('another style', [
output.Rule(
output.MinScaleDenominator(101),
output.MaxScaleDenominator(200),
output.Filter("[this] = 2"),
[
output.PolygonSymbolizer(color(0x33, 0x33, 0x33)),
output.LineSymbolizer(color(0x66, 0x66, 0x66), 2)
])
])
])
])
mmap = mapnik.Map(640, 480)
map.to_mapnik(mmap)
(handle, path) = tempfile.mkstemp(suffix='.xml', prefix='cascadenik-mapnik-')
os.close(handle)
mapnik.save_map(mmap, path)
doc = xml.etree.ElementTree.parse(path)
map_el = doc.getroot()
# print open(path, 'r').read()
os.unlink(path)
self.assertEqual(2, len(map_el.findall('Style')))
self.assertEqual(2, len(map_el.findall('Layer')))
for layer_el in map_el.findall('Layer'):
self.assertEqual(1, len(layer_el.findall('StyleName')))
self.assertTrue(layer_el.find('StyleName').text in [style_el.get('name') for style_el in map_el.findall('Style')])
for style_el in map_el.findall('Style'):
if style_el.get('name') == 'a style':
self.assertEqual("([this]='that')", style_el.find('Rule').find('Filter').text)
self.assertEqual('1', style_el.find('Rule').find('MinScaleDenominator').text)
self.assertEqual('100', style_el.find('Rule').find('MaxScaleDenominator').text)
self.assertEqual(1, len(style_el.find('Rule').findall('PolygonSymbolizer')))
if style_el.get('name') == 'another style':
self.assertEqual('([this]=2)', style_el.find('Rule').find('Filter').text)
self.assertEqual('101', style_el.find('Rule').find('MinScaleDenominator').text)
self.assertEqual('200', style_el.find('Rule').find('MaxScaleDenominator').text)
self.assertEqual(1, len(style_el.find('Rule').findall('PolygonSymbolizer')))
self.assertEqual(1, len(style_el.find('Rule').findall('LineSymbolizer')))
def testCompile4(self):
s = """<?xml version="1.0"?>
<Map>
<Stylesheet>
Map {
map-bgcolor: #fff;
}
Layer {
point-file: url('http://cascadenik-sampledata.s3.amazonaws.com/purple-point.png');
point-allow-overlap: true;
}
Layer {
line-color: #0f0;
line-width: 3;
line-dasharray: 8,100,4,50;
}
Layer {
polygon-pattern-file: url('http://cascadenik-sampledata.s3.amazonaws.com/purple-point.png');
}
Layer {
line-pattern-file: url('http://cascadenik-sampledata.s3.amazonaws.com/purple-point.png');
}
Layer name {
text-face-name: "DejaVu Sans Book";
text-size: 10;
text-fill: #005;
text-halo-radius: 1;
text-halo-fill: #f00;
text-placement: line;
text-allow-overlap: true;
text-avoid-edges: true;
}
Layer name2 {
shield-face-name: 'Helvetica';
shield-size: 12;
shield-file: url('http://cascadenik-sampledata.s3.amazonaws.com/purple-point.png');
shield-width: 16;
shield-height: 16;
shield-fill: #f00;
shield-min-distance: 5;
shield-spacing: 7;
shield-line-spacing: 3;
shield-character-spacing: 18;
}
</Stylesheet>
<Datasource name="template">
<Parameter name="type">shape</Parameter>
<Parameter name="encoding">latin1</Parameter>
</Datasource>
<Layer>
<Datasource base="template">
<Parameter name="type">shape</Parameter>
<Parameter name="file">%(data)s/test.shp</Parameter>
</Datasource>
</Layer>
</Map>
""" % self.__dict__
mmap = mapnik.Map(640, 480)
ms = compile(s, self.dirs)
ms.to_mapnik(mmap, self.dirs)
mapnik.save_map(mmap, os.path.join(self.tmpdir, 'out.mml'))
def testCompile5(self):
s = u"""<?xml version="1.0" encoding="UTF-8" ?>
<Map>
<Stylesheet>
Layer[name="<NAME>"] { polygon-fill: #000; }
</Stylesheet>
<Layer>
<Datasource>
<Parameter name="type">shape</Parameter>
<Parameter name="file">%(data)s/test.shp</Parameter>
</Datasource>
</Layer>
</Map>
""".encode('utf-8') % self.__dict__
mmap = mapnik.Map(640, 480)
ms = compile(s, self.dirs)
ms.to_mapnik(mmap, self.dirs)
mapnik.save_map(mmap, os.path.join(self.tmpdir, 'out.mml'))
def testCompile6(self):
s = u"""
Layer NAME
{
text-anchor-dx: 10;
text-anchor-dy: 10;
text-allow-overlap: true;
text-avoid-edges: true;
text-align: middle;
text-character-spacing: 10;
text-dx: 10;
text-dy: 15;
text-face-name: 'Helvetica';
text-fill: #f00;
text-halo-fill: #ff0;
text-halo-radius: 2;
text-label-position-tolerance: 25;
text-line-spacing:10;
text-anchor-dx: 10;
text-anchor-dy: 10;
text-align: left;
text-vertical-align: bottom;
text-justify-align: left;
text-transform: uppercase;
text-size: 12;
text-spacing: 50;
text-wrap-width: 100;
text-transform: uppercase;
text-max-char-angle-delta: 10;
text-min-distance: 5;
text-placement: line;
text-vertical-align: top;
}
"""
declarations = stylesheet_declarations(s, is_merc=True)
text_rule_groups = get_text_rule_groups(declarations)
sym = text_rule_groups['NAME'][0].symbolizers[0].to_mapnik()
if MAPNIK_VERSION >= 200000:
self.assertEqual((10, 15), sym.properties.displacement if (MAPNIK_VERSION >= 200100) else sym.displacement)
else:
self.assertEqual([10, 15], sym.get_displacement())
# todo - anchor (does not do anything yet in mapnik, but likely will)
# and is not set in xml, but accepted in python
#self.assertEqual([0,5], sym.get_anchor())
self.assertEqual(True, sym.properties.allow_overlap if (MAPNIK_VERSION >= 200100) else sym.allow_overlap)
self.assertEqual(True, sym.properties.avoid_edges if (MAPNIK_VERSION >= 200100) else sym.avoid_edges)
self.assertEqual(10, sym.format.character_spacing if (MAPNIK_VERSION >= 200100) else sym.character_spacing)
self.assertEqual('Helvetica', sym.format.face_name if (MAPNIK_VERSION >= 200100) else sym.face_name)
self.assertEqual(mapnik.Color("#f00"), sym.format.fill if (MAPNIK_VERSION >= 200100) else sym.fill)
self.assertEqual(mapnik.justify_alignment.LEFT, sym.properties.justify_alignment if (MAPNIK_VERSION >= 200100) else sym.justify_alignment)
self.assertEqual(mapnik.Color("#ff0"), sym.format.halo_fill if (MAPNIK_VERSION >= 200100) else sym.halo_fill)
self.assertEqual(2, sym.format.halo_radius if (MAPNIK_VERSION >= 200100) else sym.halo_radius)
if MAPNIK_VERSION >= 200100:
# TextSymbolizer got a "clip" attribute and we want it to be false.
self.assertFalse(sym.clip)
if MAPNIK_VERSION >= 200100:
# TextSymbolizer lost its "name" attribute in Mapnik 2.1.
pass
elif MAPNIK_VERSION >= 200001:
self.assertEqual('[NAME]', str(sym.name))
else:
self.assertEqual('NAME', sym.name)
self.assertEqual(12, sym.format.text_size if (MAPNIK_VERSION >= 200100) else sym.text_size)
self.assertEqual(100, sym.properties.wrap_width if (MAPNIK_VERSION >= 200100) else sym.wrap_width)
self.assertEqual(50, sym.properties.label_spacing if (MAPNIK_VERSION >= 200100) else sym.label_spacing)
self.assertEqual(25, sym.properties.label_position_tolerance if (MAPNIK_VERSION >= 200100) else sym.label_position_tolerance)
if MAPNIK_VERSION >= 200100:
# Seriously?
self.assertEqual(10, sym.properties.maximum_angle_char_delta if (MAPNIK_VERSION >= 200100) else sym.maximum_angle_char_delta)
else:
self.assertEqual(10, sym.max_char_angle_delta)
self.assertEqual(10, sym.format.line_spacing if (MAPNIK_VERSION >= 200100) else sym.line_spacing)
self.assertEqual(5, sym.properties.minimum_distance if (MAPNIK_VERSION >= 200100) else sym.minimum_distance)
self.assertEqual(mapnik.label_placement.LINE_PLACEMENT, sym.properties.label_placement if (MAPNIK_VERSION >= 200100) else sym.label_placement)
def testCompile7(self):
s = """
#roads
{
line-color: #f90;
line-width: 1 !important;
}
#roads[tiny=yes]
{
display: none;
}
"""
declarations = stylesheet_declarations(s, is_merc=True)
line_rules = get_line_rules(declarations)
self.assertEqual(1, len(line_rules))
self.assertEqual(line_rules[0].filter.text, "not [tiny] = 'yes'")
def testCompile8(self):
s = """
#roads[zoom=12]
{
line-color: #f90;
line-width: 1;
}
#roads[zoom=12] name
{
text-fill: #f90;
text-face-name: "Courier New";
text-size: 12;
}
"""
declarations = stylesheet_declarations(s, is_merc=True, scale=2)
line_rules = get_line_rules(declarations)
line_rule = line_rules[0]
self.assertEqual(1, len(line_rules))
self.assertEqual(51070, line_rule.minscale.value)
self.assertEqual(102139, line_rule.maxscale.value)
self.assertEqual(2, line_rule.symbolizers[0].width)
text_rules = get_text_rule_groups(declarations).get('name', [])
text_rule = text_rules[0]
self.assertEqual(1, len(text_rules))
self.assertEqual(51070, text_rule.minscale.value)
self.assertEqual(102139, text_rule.maxscale.value)
self.assertEqual(24, text_rule.symbolizers[0].size)
def testCompile9(self):
s = u"""
Layer NAME
{
text-face-name: 'Helvetica', 'DejaVu Sans Book';
text-fill: #f00;
text-size: 12;
}
"""
if MAPNIK_VERSION < 200100:
# Mapnik only supports multiple font face names as of version 2.1
return
declarations = stylesheet_declarations(s, is_merc=True)
text_rule_groups = get_text_rule_groups(declarations)
symbolizer = text_rule_groups['NAME'][0].symbolizers[0]
fontsets = {symbolizer.get_fontset_name(): output.FontSet(symbolizer.face_name.values).to_mapnik()}
sym = text_rule_groups['NAME'][0].symbolizers[0].to_mapnik(fontsets)
self.assertEqual(mapnik.Color("#f00"), sym.format.fill if (MAPNIK_VERSION >= 200100) else sym.fill)
self.assertEqual(12, sym.format.text_size if (MAPNIK_VERSION >= 200100) else sym.text_size)
# TODO: test for output of FontSet in text symbolizer when Mapnik
# adds support. See also https://github.com/mapnik/mapnik/issues/1483
def testCompile10(self):
"""
"""
s = """<?xml version="1.0"?>
<Map>
<Stylesheet>
Map { map-bgcolor: #fff; }
Layer name
{
text-face-name: 'Comic Sans', 'Papyrus';
text-size: 14;
text-fill: #f90;
}
</Stylesheet>
<Datasource name="template">
<Parameter name="type">shape</Parameter>
<Parameter name="encoding">latin1</Parameter>
</Datasource>
<Layer>
<Datasource base="template">
<Parameter name="type">shape</Parameter>
<Parameter name="file">%(data)s/test.shp</Parameter>
</Datasource>
</Layer>
</Map>
""" % self.__dict__
map = compile(s, self.dirs)
mmap = mapnik.Map(640, 480)
map.to_mapnik(mmap)
(handle, path) = tempfile.mkstemp(suffix='.xml', prefix='cascadenik-mapnik-')
os.close(handle)
mapnik.save_map(mmap, path)
doc = xml.etree.ElementTree.parse(path)
map_el = doc.getroot()
self.assertEqual(len(map_el.find("Layer").findall('Datasource')), 1)
params = dict(((p.get('name'), p.text) for p in map_el.find('Layer').find('Datasource').findall('Parameter')))
self.assertEqual(params['type'], 'shape')
self.assertTrue(params['file'].endswith('%s/test.shp' % self.data))
self.assertEqual(params['encoding'], 'latin1')
textsym_el = map_el.find('Style').find('Rule').find('TextSymbolizer')
if | |
the second dimension of logits.
Returns:
Routing probabilities for each pair of capsules. Same shape as logits.
"""
# leak is a zero matrix with same shape as logits except dim(2) = 1 because
# of the reduce_sum.
leak = tf.zeros_like(logits, optimize=True)
leak = tf.reduce_sum(leak, axis=2, keep_dims=True)
leaky_logits = tf.concat([leak, logits], axis=2)
leaky_routing = tf.nn.softmax(leaky_logits, dim=2)
return tf.split(leaky_routing, [1, output_dim], 2)[1]
def _dynamic_routing(self,
votes,
biases,
logit_shape,
num_dims,
input_dim,
output_dim,
act_fn,
num_routing,
leaky=False):
"""Sums over scaled votes and applies squash to compute the activations.
Iteratively updates routing logits (scales) based on the similarity between
the activation of this layer and the votes of the layer below.
Args:
votes: tensor, The transformed outputs of the layer below.
biases: tensor, Bias tf_variable.
logit_shape: tensor, shape of the logit to be initialized.
num_dims: scalar, number of dimensions in votes. For fully connected
capsule it is 4, for convolution 6.
input_dim: scalar, number of capsules in the input layer.
output_dim: scalar, number of capsules in the output layer.
act_fn: activation function.
num_routing: scalar, Number of routing iterations.
leaky: boolean, if set use leaky routing.
Returns:
The activation tensor of the output layer after num_routing iterations.
"""
# votes shape: [batch, input_dim, output_dim, output_atoms]
votes_t_shape = [3, 0, 1, 2]
for i in range(num_dims - 4):
votes_t_shape += [i + 4] # CONV: votes_t_shape - [3, 0, 1, 2, 4, 5]
r_t_shape = [1, 2, 3, 0]
for i in range(num_dims - 4):
r_t_shape += [i + 4] # CONV: r_t_shape - [1, 2, 3, 0, 4, 5]
# votes_trans: [output_atoms, batch, input_dim, output_dim]
votes_trans = tf.transpose(votes, votes_t_shape)
use_bias = self.use_bias
def _body(i_route, logits_, activations_):
"""Routing while loop."""
# logits_: [batch, input_dim, output_dim]
if leaky:
route = self._leaky_routing(logits_, output_dim)
else:
route = tf.nn.softmax(logits_, dim=2)
# route: [batch, input_dim, output_dim]
# pre_act_unrolled: [output_atoms, batch, input_dim, output_dim]
pre_act_unrolled = route * votes_trans
# pre_act_trans: [batch, input_dim, output_dim, output_atoms]
pre_act_trans = tf.transpose(pre_act_unrolled, r_t_shape)
# pre_act: [batch, output_dim, output_atoms]
if use_bias:
pre_act = tf.reduce_sum(pre_act_trans, axis=1) + biases
else:
pre_act = tf.reduce_sum(pre_act_trans, axis=1)
# activated: [batch, output_dim, output_atoms]
activated = act_fn(pre_act)
activations_ = activations_.write(i_route, activated)
# act_3d: [batch, 1, output_dim, output_atoms]
act_3d = tf.expand_dims(activated, 1)
# act_tiled: [batch, input_dim, output_dim, output_atoms]
tile_shape = list(np.ones(num_dims, dtype=np.int32))
tile_shape[1] = input_dim
act_tiled = tf.tile(act_3d, tile_shape)
# distances: [batch, input_dim, output_dim]
distances = tf.reduce_sum(votes * act_tiled, axis=3)
logits_ += distances
return i_route + 1, logits_, activations_
activations = tf.TensorArray(
dtype=tf.float32, size=num_routing, clear_after_read=False)
logits = tf.fill(logit_shape, 0.0)
i = tf.constant(0, dtype=tf.int32)
_, logits, activations = tf.while_loop(
lambda i_route, logits_, activations_: i_route < num_routing,
_body,
loop_vars=[i, logits, activations],
swap_memory=True)
return activations.read(num_routing - 1)
def __call__(self, inputs):
"""Single full-connected layer
Args:
inputs: tensor, activation output of the layer below of shape
`[batch, input_dim, input_atoms]`.
Returns:
Tensor of activations for this layer of shape
`[batch, output_dim, output_atoms]`.
"""
with tf.variable_scope('caps_{}'.format(self.idx)):
batch_size, input_dim, input_atoms = inputs.get_shape().as_list()
# weights_initializer = tf.truncated_normal_initializer(
# stddev=0.01, dtype=tf.float32)
# biases_initializer = tf.zeros_initializer()
weights_initializer = tf.truncated_normal_initializer(
stddev=0.1, dtype=tf.float32)
biases_initializer = tf.constant_initializer(0.1)
weights, biases = self._get_variables(
use_bias=self.use_bias,
weights_shape=[input_dim, input_atoms,
self.output_dim * self.output_atoms],
biases_shape=[self.output_dim, self.output_atoms],
weights_initializer=weights_initializer,
biases_initializer=biases_initializer,
store_on_cpu=self.cfg.VAR_ON_CPU
)
with tf.name_scope('Wx_plus_b'):
# Depth-wise matmul: [b, d, c] ** [d, c, o_c] = [b, d, o_c]
# To do this: tile input, do element-wise multiplication and reduce
# sum over input_atoms dimension.
# [batch, input_dim, input_atoms, output_dim *output_atoms]
input_tiled = tf.tile(
tf.expand_dims(inputs, -1),
[1, 1, 1, self.output_dim * self.output_atoms])
# [batch, input_dim, output_dim * output_atoms]
votes = tf.reduce_sum(input_tiled * weights, axis=2)
# [batch, input_dim, output_dim, output_atoms]
votes_reshaped = tf.reshape(
votes, [-1, input_dim, self.output_dim, self.output_atoms])
with tf.name_scope('routing'):
logit_shape = tf.stack([batch_size, input_dim, self.output_dim])
self.output = self._dynamic_routing(
votes=votes_reshaped,
biases=biases,
logit_shape=logit_shape,
num_dims=4,
input_dim=input_dim,
output_dim=self.output_dim,
act_fn=self._get_act_fn(self.act_fn),
num_routing=self.num_routing,
leaky=self.leaky)
return self.output
class ConvSlimCapsuleV2(CapsuleV2):
def __init__(self,
cfg,
output_dim,
output_atoms,
num_routing=3,
leaky=False,
kernel_size=5,
stride=2,
padding='SAME',
conv_act_fn=None,
caps_act_fn='squash_v2',
use_bias=True,
idx=0):
"""Builds a slim convolutional capsule layer.
This layer performs 2D convolution given 5D input tensor of shape
`[batch, input_dim, input_atoms, input_height, input_width]`. Then refines
the votes with routing and applies Squash non linearity for each capsule.
Each capsule in this layer is a convolutional unit and shares its kernel
over the position grid and different capsules of layer below. Therefore,
number of trainable variables in this layer is:
kernel: [kernel_size, kernel_size, input_atoms, output_dim * output_atoms]
bias: [output_dim, output_atoms]
Output of a conv2d layer is a single capsule with channel number of atoms.
Therefore conv_slim_capsule is suitable to be added on top of a conv2d
layer with num_routing=1, input_dim=1 and input_atoms=conv_channels.
Args:
cfg: configuration
output_dim: scalar, number of capsules in this layer.
output_atoms: scalar, number of units in each capsule of output layer.
num_routing: scalar, Number of routing iterations.
leaky: boolean, if set use leaky routing.
kernel_size: scalar, convolutional kernels are [kernel_size, kernel_size].
stride: scalar, stride of the convolutional kernel.
padding: 'SAME' or 'VALID', padding mechanism for convolutional kernels.
conv_act_fn: activation function of convolution.
caps_act_fn: activation function of capsule.
use_bias: bool, if add biases.
idx: int, index of layer.
"""
super(ConvSlimCapsuleV2, self).__init__(
cfg=cfg,
output_dim=output_dim,
output_atoms=output_atoms,
num_routing=num_routing,
leaky=leaky,
act_fn=caps_act_fn,
use_bias=use_bias,
idx=idx
)
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.conv_act_fn = conv_act_fn
self.caps_act_fn = caps_act_fn
def _depthwise_conv3d(self, input_tensor, kernel):
"""Performs 2D convolution given a 5D input tensor.
This layer given an input tensor of shape
`[batch, input_dim, input_atoms, input_height, input_width]` squeezes the
first two dimensions to get a 4D tensor as the input of tf.nn.conv2d. Then
splits the first dimension and the last dimension and returns the 6D
convolution output.
Args:
input_tensor: tensor, of rank 5. Last two dimensions representing height
and width position grid.
- shape: [batch, 1, 256, height, width]
kernel: Tensor, convolutional kernel variables.
Returns:
6D Tensor output of a 2D convolution with shape
`[batch, input_dim, output_dim, output_atoms, out_height, out_width]`,
the convolution output shape and the input shape.
If padding is 'SAME', out_height = in_height and out_width = in_width.
Otherwise, height and width is adjusted with same rules as 'VALID' in
tf.nn.conv2d.
"""
with tf.name_scope('conv'):
batch_size, input_dim, input_atoms, \
input_height, input_width = input_tensor.get_shape().as_list()
# Reshape input_tensor to 4D by merging first two dimensions.
# tf.nn.conv2d only accepts 4D tensors.
input_tensor_reshaped = tf.reshape(input_tensor, [
batch_size * input_dim, input_atoms, input_height, input_width
])
input_tensor_reshaped.set_shape(
(None, input_atoms, input_height, input_width))
conv = tf.nn.conv2d(
input_tensor_reshaped,
kernel,
[1, 1, self.stride, self.stride],
padding=self.padding,
data_format='NCHW')
conv_shape = tf.shape(conv)
if self.conv_act_fn is not None:
act_fn_conv = self._get_act_fn(self.conv_act_fn)
conv = act_fn_conv(conv)
_, _, conv_height, conv_width = conv.get_shape().as_list()
# Reshape back to 6D by splitting first dimension to batch and input_dim
# and splitting second dimension to output_dim and output_atoms.
conv_reshaped = tf.reshape(conv, [
batch_size, input_dim, self.output_dim,
self.output_atoms, conv_shape[2], conv_shape[3]
])
conv_reshaped.set_shape((
None, input_dim, self.output_dim,
self.output_atoms, conv_height, conv_width
))
return conv_reshaped, conv_shape
def __call__(self, inputs):
"""Single full-connected layer
Args:
inputs: tensor, 5D input tensor of shape
`[batch, input_dim, input_atoms, input_height, input_width]`. Then refines
the votes with routing and applies Squash non linearity for each capsule.
Returns:
Tensor of activations for this layer of shape
`[batch, output_dim, output_atoms, out_height, out_width]`. If padding is
'SAME', out_height = in_height and out_width = in_width. Otherwise, height
and width is adjusted with same rules as 'VALID' in tf.nn.conv2d.
"""
with tf.variable_scope('caps_{}'.format(self.idx)):
batch_size, input_dim, input_atoms, _, _ = inputs.get_shape().as_list()
# weights_initializer = tf.contrib.layers.xavier_initializer()
# biases_initializer = tf.zeros_initializer()
weights_initializer = tf.truncated_normal_initializer(
stddev=0.1, dtype=tf.float32)
biases_initializer = tf.constant_initializer(0.1)
weights, biases = self._get_variables(
use_bias=self.use_bias,
weights_shape=[self.kernel_size, self.kernel_size,
input_atoms, self.output_dim * self.output_atoms],
biases_shape=[self.output_dim, self.output_atoms, 1, 1],
weights_initializer=weights_initializer,
biases_initializer=biases_initializer,
store_on_cpu=self.cfg.VAR_ON_CPU
)
votes, votes_shape = self._depthwise_conv3d(inputs, weights)
with tf.name_scope('routing'):
logit_shape = tf.stack([
batch_size, input_dim, self.output_dim, votes_shape[2], votes_shape[3]
])
if self.use_bias:
biases = tf.tile(biases, [1, 1, votes_shape[2], votes_shape[3]])
self.output = self._dynamic_routing(
votes=votes,
biases=biases,
logit_shape=logit_shape,
num_dims=6,
input_dim=input_dim,
output_dim=self.output_dim,
act_fn=self._get_act_fn(self.caps_act_fn),
num_routing=self.num_routing,
leaky=self.leaky)
return self.output
class Mask(ModelBase):
def __init__(self, labels):
"""Get masked tensor.
Args:
labels: labels of inputs tensor
"""
super(Mask, self).__init__()
self.labels = labels
def __call__(self, inputs):
"""Reshape a tensor.
Args:
inputs: input tensor
- shape: [batch, input_dim, input_atoms]
Returns:
masked tensor
"""
self.output = tf.reduce_sum(
tf.multiply(inputs, tf.expand_dims(self.labels, axis=-1)), axis=1)
return self.output
class Capsule5Dto3D(ModelBase):
def __init__(self):
"""Convert | |
# coding: utf-8
"""
Galaxy 3.2 API (wip)
Galaxy 3.2 API (wip) # noqa: E501
The version of the OpenAPI document: 1.2.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class Namespace(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'id': 'int',
'url': 'str',
'related': 'dict(str, str)',
'summary_fields': 'dict(str, object)',
'active': 'str',
'modified': 'str',
'created': 'str',
'avatar_url': 'str',
'company': 'str',
'description': 'str',
'email': 'str',
'html_url': 'str',
'is_vendor': 'bool',
'location': 'str',
'name': 'str'
}
attribute_map = {
'id': 'id',
'url': 'url',
'related': 'related',
'summary_fields': 'summary_fields',
'active': 'active',
'modified': 'modified',
'created': 'created',
'avatar_url': 'avatar_url',
'company': 'company',
'description': 'description',
'email': 'email',
'html_url': 'html_url',
'is_vendor': 'is_vendor',
'location': 'location',
'name': 'name'
}
def __init__(self, id=None, url=None, related=None, summary_fields=None, active=None, modified=None, created=None, avatar_url=None, company=None, description=None, email=None, html_url=None, is_vendor=None, location=None, name=None): # noqa: E501
"""Namespace - a model defined in OpenAPI""" # noqa: E501
self._id = None
self._url = None
self._related = None
self._summary_fields = None
self._active = None
self._modified = None
self._created = None
self._avatar_url = None
self._company = None
self._description = None
self._email = None
self._html_url = None
self._is_vendor = None
self._location = None
self._name = None
self.discriminator = None
self.id = id
if url is not None:
self.url = url
if related is not None:
self.related = related
if summary_fields is not None:
self.summary_fields = summary_fields
if active is not None:
self.active = active
if modified is not None:
self.modified = modified
if created is not None:
self.created = created
self.avatar_url = avatar_url
self.company = company
if description is not None:
self.description = description
self.email = email
self.html_url = html_url
if is_vendor is not None:
self.is_vendor = is_vendor
self.location = location
self.name = name
@property
def id(self):
"""Gets the id of this Namespace. # noqa: E501
Database ID for this object. # noqa: E501
:return: The id of this Namespace. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this Namespace.
Database ID for this object. # noqa: E501
:param id: The id of this Namespace. # noqa: E501
:type: int
"""
if id is None:
raise ValueError("Invalid value for `id`, must not be `None`") # noqa: E501
self._id = id
@property
def url(self):
"""Gets the url of this Namespace. # noqa: E501
URL for this resource. # noqa: E501
:return: The url of this Namespace. # noqa: E501
:rtype: str
"""
return self._url
@url.setter
def url(self, url):
"""Sets the url of this Namespace.
URL for this resource. # noqa: E501
:param url: The url of this Namespace. # noqa: E501
:type: str
"""
self._url = url
@property
def related(self):
"""Gets the related of this Namespace. # noqa: E501
Data structure with URLs of related resources. # noqa: E501
:return: The related of this Namespace. # noqa: E501
:rtype: dict(str, str)
"""
return self._related
@related.setter
def related(self, related):
"""Sets the related of this Namespace.
Data structure with URLs of related resources. # noqa: E501
:param related: The related of this Namespace. # noqa: E501
:type: dict(str, str)
"""
self._related = related
@property
def summary_fields(self):
"""Gets the summary_fields of this Namespace. # noqa: E501
Data structure with name/description for related resources. # noqa: E501
:return: The summary_fields of this Namespace. # noqa: E501
:rtype: dict(str, object)
"""
return self._summary_fields
@summary_fields.setter
def summary_fields(self, summary_fields):
"""Sets the summary_fields of this Namespace.
Data structure with name/description for related resources. # noqa: E501
:param summary_fields: The summary_fields of this Namespace. # noqa: E501
:type: dict(str, object)
"""
self._summary_fields = summary_fields
@property
def active(self):
"""Gets the active of this Namespace. # noqa: E501
:return: The active of this Namespace. # noqa: E501
:rtype: str
"""
return self._active
@active.setter
def active(self, active):
"""Sets the active of this Namespace.
:param active: The active of this Namespace. # noqa: E501
:type: str
"""
self._active = active
@property
def modified(self):
"""Gets the modified of this Namespace. # noqa: E501
Timestamp when this object was last modified. # noqa: E501
:return: The modified of this Namespace. # noqa: E501
:rtype: str
"""
return self._modified
@modified.setter
def modified(self, modified):
"""Sets the modified of this Namespace.
Timestamp when this object was last modified. # noqa: E501
:param modified: The modified of this Namespace. # noqa: E501
:type: str
"""
self._modified = modified
@property
def created(self):
"""Gets the created of this Namespace. # noqa: E501
Timestamp when this object was created. # noqa: E501
:return: The created of this Namespace. # noqa: E501
:rtype: str
"""
return self._created
@created.setter
def created(self, created):
"""Sets the created of this Namespace.
Timestamp when this object was created. # noqa: E501
:param created: The created of this Namespace. # noqa: E501
:type: str
"""
self._created = created
@property
def avatar_url(self):
"""Gets the avatar_url of this Namespace. # noqa: E501
:return: The avatar_url of this Namespace. # noqa: E501
:rtype: str
"""
return self._avatar_url
@avatar_url.setter
def avatar_url(self, avatar_url):
"""Sets the avatar_url of this Namespace.
:param avatar_url: The avatar_url of this Namespace. # noqa: E501
:type: str
"""
if avatar_url is not None and len(avatar_url) > 256:
raise ValueError("Invalid value for `avatar_url`, length must be less than or equal to `256`") # noqa: E501
self._avatar_url = avatar_url
@property
def company(self):
"""Gets the company of this Namespace. # noqa: E501
:return: The company of this Namespace. # noqa: E501
:rtype: str
"""
return self._company
@company.setter
def company(self, company):
"""Sets the company of this Namespace.
:param company: The company of this Namespace. # noqa: E501
:type: str
"""
if company is not None and len(company) > 256:
raise ValueError("Invalid value for `company`, length must be less than or equal to `256`") # noqa: E501
self._company = company
@property
def description(self):
"""Gets the description of this Namespace. # noqa: E501
:return: The description of this Namespace. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this Namespace.
:param description: The description of this Namespace. # noqa: E501
:type: str
"""
if description is not None and len(description) > 255:
raise ValueError("Invalid value for `description`, length must be less than or equal to `255`") # noqa: E501
self._description = description
@property
def email(self):
"""Gets the email of this Namespace. # noqa: E501
:return: The email of this Namespace. # noqa: E501
:rtype: str
"""
return self._email
@email.setter
def email(self, email):
"""Sets the email of this Namespace.
:param email: The email of this Namespace. # noqa: E501
:type: str
"""
if email is not None and len(email) > 256:
raise ValueError("Invalid value for `email`, length must be less than or equal to `256`") # noqa: E501
self._email = email
@property
def html_url(self):
"""Gets the html_url of this Namespace. # noqa: E501
:return: The html_url of this Namespace. # noqa: E501
:rtype: str
"""
return self._html_url
@html_url.setter
def html_url(self, html_url):
"""Sets the html_url of this Namespace.
:param html_url: The html_url of this Namespace. # noqa: E501
:type: str
"""
if html_url is not None and len(html_url) > 256:
raise ValueError("Invalid value for `html_url`, length must be less than or equal to `256`") # noqa: E501
self._html_url = html_url
@property
def is_vendor(self):
"""Gets the is_vendor of this Namespace. # noqa: E501
:return: The is_vendor of this Namespace. # noqa: E501
:rtype: bool
"""
return self._is_vendor
@is_vendor.setter
def is_vendor(self, is_vendor):
"""Sets the is_vendor of this Namespace.
:param is_vendor: The is_vendor of this Namespace. # noqa: E501
:type: bool
"""
self._is_vendor = is_vendor
@property
def location(self):
"""Gets the location of this Namespace. # noqa: E501
:return: The location of this Namespace. # noqa: E501
:rtype: str
"""
return self._location
@location.setter
def location(self, location):
"""Sets the location | |
<filename>paraschut/executor.py
# -*- coding: utf-8 -*-
"""
PARASCHUT: parallel job scheduling utils.
see also: README.md, example.ipynb
this submodule handles via a unified API the execution of jobs
on different systems, such as: PBS cluster, local multi-CPU
machine, or submission to a script file .
@author: <NAME>, <NAME>
Created on Wed Mar 18 22:45:50 2015
"""
from collections import OrderedDict
from concurrent.futures import ThreadPoolExecutor
from io import BytesIO
import os
import re
from subprocess import run, check_output, call
import time
from warnings import warn
import pandas as pd
from .config import DefQueue, DefResource, ServerHost, Hostname
from . import dal
from . import utils
class JobExecutor(object):
""" dummy template for a JobExecutor. """
def __init__(self):
pass
def submit(self, JobInfo, Spawn=False):
""" submits the job to some executor, updates the following fields:
submit_id, subtime, state.
must return submit_id or 'failed'. """
return 'failed'
def delete(self, JobInfo):
pass
def qstat(self):
""" returns a dict with ClusterIDs as keys and [name, state in {'R','Q'}]
as values. """
return {}
def get_job_id(self):
""" returns the job ID assigned by the cluster. """
return None
def get_job_summary(self, ClusterID=None):
""" returns a dict with any fields describing the job state
(time, resources, etc.). """
return {}
def isconnected(self):
""" whether we're connected to the cluster and can submit. """
return
def shutdown(self):
pass
class ClusterJobExecutor(JobExecutor):
""" dummy subclass for server based clusters. """
pass
class PBSJobExecutor(ClusterJobExecutor):
""" PBSJobExecutor is initiated with default params that can be
overriden by jobs. """
def __init__(self, queue=DefQueue, resources=DefResource):
self.queue = queue
self.resources = resources
if 'PBS_JOBID' in os.environ:
self.job_id = os.environ['PBS_JOBID'].split('.')[0]
else:
self.job_id = None
if (self.job_id is not None) or (ServerHost in Hostname):
self.connected_to_cluster = True
else:
self.connected_to_cluster = False
def submit(self, JobInfo, Spawn=False):
OutFile, ErrFile = utils.get_log_paths(JobInfo)
# build command
Qsub = ['qsub']
if 'queue' in JobInfo and JobInfo['queue'] is not None:
Qsub += ['-q', JobInfo['queue']]
elif self.queue is not None:
Qsub += ['-q', self.queue]
if ErrFile is not None:
Qsub += ['-e', ErrFile]
if OutFile is not None:
Qsub += ['-o', OutFile]
if 'resources' in JobInfo:
this_res = JobInfo['resources']
else:
this_res = self.resources
if this_res is not None:
Qsub += ['-l'] + [','.join(['{}={}'.format(k, v)
for k, v in sorted(this_res.items())])]
if 'vars' in JobInfo:
Qsub += ['-v'] + [','.join(['{}={}'.format(k, repr(v))
for k, v in sorted(JobInfo['vars'].items())])]
submit_id = check_output(Qsub + [JobInfo['script']])\
.decode('UTF-8').replace('\n', '').split('.')[0]
update_fields(JobInfo, submit_id, Spawn, OutFile, ErrFile)
return submit_id
def delete(self, JobInfo):
for jid in dal.get_internal_ids(JobInfo):
if not call(['qdel', jid]):
dal.remove_internal_id(JobInfo, jid)
JobInfo['state'] = 'init'
dal.update_job(JobInfo)
def qstat(self):
Q = {}
if not self.isconnected():
print('qstat: not running on PBS cluster.')
return Q
data = check_output(['qstat', '-u', os.environ['USER']],
universal_newlines=True)
data = data.split('\n')
job_parse = re.compile(r'(\d+).')
line_parse = re.compile(r'\s+')
for line in data:
job = job_parse.match(line) # power
if job:
line = line_parse.split(line)
Q[job.group(1)] = [line[3], line[9]]
return Q
def get_job_id(self):
return self.job_id
def get_job_summary(self, ClusterID=None):
if ClusterID is None:
ClusterID = self.job_id
if ClusterID is None:
print('get_job_summary: not running on a cluster node.')
return {}
try:
return self.__parse_qstat(check_output(['qstat', '-f', ClusterID]))
except Exception as e:
# sometimes this fails on cluster, not clear why (cluster does not recognize the BatchID)
print(e)
return None
def isconnected(self):
return self.connected_to_cluster
def __parse_qstat(self, text):
JobInfo = {}
text = text.decode('utf-8')
line_parse = re.compile(r'([\w.]*) = ([\w\s:_\-/]*)')
for line in text.splitlines():
hit = line_parse.match(line.strip())
if hit is not None:
JobInfo[hit.group(1)] = hit.group(2)
return JobInfo
class SGEJobExecutor(ClusterJobExecutor):
""" SGEJobExecutor is initiated with default params that can be
overriden by jobs. """
def __init__(self, queue=DefQueue, resources=DefResource):
self.queue = queue
self.resources = resources
if 'JOB_ID' in os.environ:
self.job_id = os.environ['JOB_ID'].split('.')[0]
else:
self.job_id = None
if (self.job_id is not None) or (ServerHost in Hostname):
self.connected_to_cluster = True
else:
self.connected_to_cluster = False
def submit(self, JobInfo, Spawn=False):
OutFile, ErrFile = utils.get_log_paths(JobInfo)
# build command
Qsub = ['qsub']
if 'queue' in JobInfo and JobInfo['queue'] is not None:
Qsub += ['-q', JobInfo['queue']]
elif self.queue is not None:
Qsub += ['-q', self.queue]
if 'name' in JobInfo and JobInfo['name'] is not None:
Qsub += ['-N', '_'.join(utils.make_iter(JobInfo['name']))]
if ErrFile is not None:
Qsub += ['-e', ErrFile]
if OutFile is not None:
Qsub += ['-o', OutFile]
if 'resources' in JobInfo:
this_res = JobInfo['resources']
else:
this_res = self.resources
if this_res is not None:
if 'smp' in this_res:
Qsub += ['-pe', f'smp {this_res.pop("smp")}']
Qsub += ['-l'] + [','.join(['{}={}'.format(k, v)
for k, v in sorted(this_res.items())])]
if 'vars' in JobInfo:
Qsub += ['-v'] + [','.join(['{}={}'.format(k, repr(v))
for k, v in sorted(JobInfo['vars'].items())])]
submit_id_raw = check_output(Qsub + [JobInfo['script']])\
.decode('UTF-8').replace('\n', '')
submit_id = submit_id_raw.split(' ')[2].split('.')[0]
update_fields(JobInfo, submit_id, Spawn, OutFile, ErrFile)
return submit_id
def delete(self, JobInfo):
for jid in dal.get_internal_ids(JobInfo):
if not call(['qdel', jid]):
dal.remove_internal_id(JobInfo, jid)
JobInfo['state'] = 'init'
dal.update_job(JobInfo)
def qstat(self):
Q = {}
if not self.isconnected():
print('qstat: not running on SGE cluster.')
return Q
data = check_output(['qstat', '-u', os.environ['USER']],
universal_newlines=True)
data = data.split('\n')
line_parse = re.compile(r'\s+')
for line in data:
line = line_parse.split(line)
if len(line) > 5 and line[1].isnumeric():
Q[line[1]] = [line[3], line[5].replace('r', 'R').replace('q', 'Q')]
return Q
def get_job_id(self):
return self.job_id
def get_job_summary(self, ClusterID=None):
if ClusterID is None:
ClusterID = self.job_id
if ClusterID is None:
print('get_job_summary: not running on a cluster node.')
return {}
try:
return self.__parse_qstat(check_output(['qstat', '-j', ClusterID]))
except Exception as e:
# sometimes this fails on cluster, not clear why (cluster does not recognize the BatchID)
print(e)
return None
def isconnected(self):
return self.connected_to_cluster
def __parse_qstat(self, text):
JobInfo = {}
text = text.decode('utf-8')
line_parse = re.compile(r'([\w.]*):(\s*)([\w\s:_\-/]*)')
for line in text.splitlines():
hit = line_parse.match(line.strip())
if hit is not None:
JobInfo[hit.group(1)] = hit.group(3)
return JobInfo
class SlurmJobExecutor(ClusterJobExecutor):
""" SlurmJobExecutor is initiated with default params that can be
overriden by jobs. """
def __init__(self, queue=DefQueue, resources=DefResource):
self.queue = queue
self.resources = resources
if 'SLURM_JOBID' in os.environ:
self.job_id = os.environ['SLURM_JOBID'].split('.')[0]
else:
self.job_id = None
if (self.job_id is not None) or (ServerHost in Hostname):
self.connected_to_cluster = True
else:
self.connected_to_cluster = False
def submit(self, JobInfo, Spawn=False):
OutFile, ErrFile = utils.get_log_paths(JobInfo)
# build command
Qsub = ['sbatch']
if 'queue' in JobInfo and JobInfo['queue'] is not None:
Qsub += ['-p', JobInfo['queue']]
elif self.queue is not None:
Qsub += ['-p', self.queue]
if 'name' in JobInfo and JobInfo['name'] is not None:
Qsub += ['--job-name=' + '_'.join(utils.make_iter(JobInfo['name']))]
if ErrFile is not None:
Qsub += ['-e', ErrFile]
if OutFile is not None:
Qsub += ['-o', OutFile]
if 'resources' in JobInfo:
this_res = JobInfo['resources']
else:
this_res = self.resources
if this_res is not None:
if 'walltime' in this_res:
Qsub += ['--time', this_res.pop('walltime')]
Qsub += ['--gres=' + this_res]
if 'vars' in JobInfo:
warn('environment variables cannot be set on Slurm clusters.')
submit_id_raw = check_output(Qsub + [JobInfo['script']])\
.decode('UTF-8').replace('\n', '')
submit_id = submit_id_raw.split(' ')[3].split('.')[0]
update_fields(JobInfo, submit_id, Spawn, OutFile, ErrFile)
return submit_id
def delete(self, JobInfo):
for jid in dal.get_internal_ids(JobInfo):
if not call(['scancel', jid]):
dal.remove_internal_id(JobInfo, jid)
JobInfo['state'] = 'init'
dal.update_job(JobInfo)
def qstat(self):
Q = {}
if not self.isconnected():
print('qstat: not running on Slurm cluster.')
return Q
data = check_output(['squeue', '-u', os.environ['USER']],
universal_newlines=True)
data = data.split('\n')
line_parse = re.compile(r'\s+')
for line in data:
line = line_parse.split(line)
if len(line) > 5 and line[1].isnumeric():
Q[line[1]] = [line[3], line[5].replace('r', 'R').replace('q', 'Q')]
return Q
def get_job_id(self):
return self.job_id
def get_job_summary(self, ClusterID=None):
if ClusterID is None:
ClusterID = self.job_id
if ClusterID is None:
print('get_job_summary: not running on a cluster node.')
return {}
try:
return self.__parse_qstat(check_output(['sstat', ClusterID]))
except Exception as e:
# sometimes this fails on cluster, not clear why (cluster does not recognize the BatchID)
print(e)
return None
def isconnected(self):
return self.connected_to_cluster
def __parse_qstat(self, text):
return pd.read_csv(BytesIO(text), sep=r'\s+').to_dict(orient='index')[1]
class LocalJobExecutor(JobExecutor):
""" returns a pool executer with a submit method.
currently using ThreadPoolExecutor to start new subprocesses. """
def __init__(self, max_workers=os.cpu_count(), submitter=False, verbose=2):
""" verbose level 1 is for muted exceptions, 2 is for warnings,
3 is for debugging logs. """
self._pool = ThreadPoolExecutor(max_workers=max_workers)
self._queue = OrderedDict()
self.verbose = verbose
if 'PBS_JOBID' in os.environ and not submitter:
self.job_id = os.environ['PBS_JOBID'].split('.')[0]
else:
self.job_id = 'paraschut'
self.connected_to_cluster = True
def submit(self, JobInfo, Spawn=False):
if self.job_id != 'paraschut':
self.__print('cannot submit from a subprocess. ClusterID must be set to "paraschut".', 2)
return 'failed'
OutFile, ErrFile = utils.get_log_paths(JobInfo)
submit_id = str(int(10**3*time.time() % 10**10))
update_fields(JobInfo, submit_id, Spawn, OutFile, ErrFile)
self._queue[submit_id] = [f"{JobInfo['BatchID']}-{JobInfo['JobIndex']}", | |
= _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
self._read()
def _read(self):
self._debug['sound_id']['start'] = self._io.pos()
self.sound_id = self._io.read_u2be()
self._debug['sound_id']['end'] = self._io.pos()
self._debug['volume']['start'] = self._io.pos()
self.volume = self._io.read_u2be()
self._debug['volume']['end'] = self._io.pos()
self._debug['pitch']['start'] = self._io.pos()
self.pitch = self._io.read_u2be()
self._debug['pitch']['end'] = self._io.pos()
class PuzzleCondC(KaitaiStruct):
SEQ_FIELDS = ["i_0x00", "i_0x04", "i_0x08", "i_0x0c", "i_0x10", "i_0x14"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
self._read()
def _read(self):
self._debug['i_0x00']['start'] = self._io.pos()
self.i_0x00 = self._io.read_u4be()
self._debug['i_0x00']['end'] = self._io.pos()
self._debug['i_0x04']['start'] = self._io.pos()
self.i_0x04 = self._io.read_u4be()
self._debug['i_0x04']['end'] = self._io.pos()
self._debug['i_0x08']['start'] = self._io.pos()
self.i_0x08 = self._io.read_u4be()
self._debug['i_0x08']['end'] = self._io.pos()
self._debug['i_0x0c']['start'] = self._io.pos()
self.i_0x0c = self._io.read_u4be()
self._debug['i_0x0c']['end'] = self._io.pos()
self._debug['i_0x10']['start'] = self._io.pos()
self.i_0x10 = self._io.read_u4be()
self._debug['i_0x10']['end'] = self._io.pos()
self._debug['i_0x14']['start'] = self._io.pos()
self.i_0x14 = self._io.read_u4be()
self._debug['i_0x14']['end'] = self._io.pos()
class EnemyInstructionTurn(KaitaiStruct):
SEQ_FIELDS = ["lookat_x", "lookat_y", "lookat_z", "choose_random_direction"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
self._read()
def _read(self):
self._debug['lookat_x']['start'] = self._io.pos()
self.lookat_x = self._io.read_f4be()
self._debug['lookat_x']['end'] = self._io.pos()
self._debug['lookat_y']['start'] = self._io.pos()
self.lookat_y = self._io.read_f4be()
self._debug['lookat_y']['end'] = self._io.pos()
self._debug['lookat_z']['start'] = self._io.pos()
self.lookat_z = self._io.read_f4be()
self._debug['lookat_z']['end'] = self._io.pos()
self._debug['choose_random_direction']['start'] = self._io.pos()
self.choose_random_direction = self._io.read_u4be()
self._debug['choose_random_direction']['end'] = self._io.pos()
class PuzzleAction0x4a(KaitaiStruct):
SEQ_FIELDS = ["u32_0x24", "u32_0x24_0x0c", "u16_0x0a"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
self._read()
def _read(self):
self._debug['u32_0x24']['start'] = self._io.pos()
self.u32_0x24 = self._io.read_u4be()
self._debug['u32_0x24']['end'] = self._io.pos()
self._debug['u32_0x24_0x0c']['start'] = self._io.pos()
self.u32_0x24_0x0c = self._io.read_u4be()
self._debug['u32_0x24_0x0c']['end'] = self._io.pos()
self._debug['u16_0x0a']['start'] = self._io.pos()
self.u16_0x0a = self._io.read_u2be()
self._debug['u16_0x0a']['end'] = self._io.pos()
class EnemyConditionalInstruction(KaitaiStruct):
SEQ_FIELDS = ["instr"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
self._read()
def _read(self):
self._debug['instr']['start'] = self._io.pos()
self.instr = GloverLevel.EnemyInstruction(self._io, self, self._root)
self._debug['instr']['end'] = self._io.pos()
class PlatSetTag(KaitaiStruct):
SEQ_FIELDS = ["tag"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
self._read()
def _read(self):
self._debug['tag']['start'] = self._io.pos()
self.tag = self._io.read_u2be()
self._debug['tag']['end'] = self._io.pos()
class PlatCopySpinFromParent(KaitaiStruct):
SEQ_FIELDS = []
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
self._read()
def _read(self):
pass
class Vent(KaitaiStruct):
SEQ_FIELDS = ["type", "u16_0x0a", "parent_tag", "origin_x", "origin_y", "origin_z", "particle_velocity_x", "particle_velocity_y", "particle_velocity_z"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
self._read()
def _read(self):
self._debug['type']['start'] = self._io.pos()
self.type = self._io.read_u2be()
self._debug['type']['end'] = self._io.pos()
self._debug['u16_0x0a']['start'] = self._io.pos()
self.u16_0x0a = self._io.read_u2be()
self._debug['u16_0x0a']['end'] = self._io.pos()
self._debug['parent_tag']['start'] = self._io.pos()
self.parent_tag = self._io.read_u2be()
self._debug['parent_tag']['end'] = self._io.pos()
self._debug['origin_x']['start'] = self._io.pos()
self.origin_x = self._io.read_f4be()
self._debug['origin_x']['end'] = self._io.pos()
self._debug['origin_y']['start'] = self._io.pos()
self.origin_y = self._io.read_f4be()
self._debug['origin_y']['end'] = self._io.pos()
self._debug['origin_z']['start'] = self._io.pos()
self.origin_z = self._io.read_f4be()
self._debug['origin_z']['end'] = self._io.pos()
self._debug['particle_velocity_x']['start'] = self._io.pos()
self.particle_velocity_x = self._io.read_f4be()
self._debug['particle_velocity_x']['end'] = self._io.pos()
self._debug['particle_velocity_y']['start'] = self._io.pos()
self.particle_velocity_y = self._io.read_f4be()
self._debug['particle_velocity_y']['end'] = self._io.pos()
self._debug['particle_velocity_z']['start'] = self._io.pos()
self.particle_velocity_z = self._io.read_f4be()
self._debug['particle_velocity_z']['end'] = self._io.pos()
class PuzzleCond(KaitaiStruct):
SEQ_FIELDS = ["cond_type", "body"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
self._read()
def _read(self):
self._debug['cond_type']['start'] = self._io.pos()
self.cond_type = self._io.read_u2be()
self._debug['cond_type']['end'] = self._io.pos()
self._debug['body']['start'] = self._io.pos()
_on = self.cond_type
if _on == 39:
self.body = GloverLevel.PuzzleCondC(self._io, self, self._root)
elif _on == 35:
self.body = GloverLevel.PuzzleCondC(self._io, self, self._root)
elif _on == 38:
self.body = GloverLevel.PuzzleCondD(self._io, self, self._root)
elif _on == 40:
self.body = GloverLevel.PuzzleCondD(self._io, self, self._root)
elif _on == 37:
self.body = GloverLevel.PuzzleCondC(self._io, self, self._root)
elif _on == 41:
self.body = GloverLevel.PuzzleCondE(self._io, self, self._root)
elif _on == 36:
self.body = GloverLevel.PuzzleCondD(self._io, self, self._root)
elif _on == 34:
self.body = GloverLevel.PuzzleCondB(self._io, self, self._root)
else:
self.body = GloverLevel.PuzzleCondA(self._io, self, self._root)
self._debug['body']['end'] = self._io.pos()
class PlatMvspn0x73(KaitaiStruct):
SEQ_FIELDS = ["u16_0x0c", "u32_0x34", "u32_0x38", "u32_0x3c"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
self._read()
def _read(self):
self._debug['u16_0x0c']['start'] = self._io.pos()
self.u16_0x0c = self._io.read_u2be()
self._debug['u16_0x0c']['end'] = self._io.pos()
self._debug['u32_0x34']['start'] = self._io.pos()
self.u32_0x34 = self._io.read_u4be()
self._debug['u32_0x34']['end'] = self._io.pos()
self._debug['u32_0x38']['start'] = self._io.pos()
self.u32_0x38 = self._io.read_u4be()
self._debug['u32_0x38']['end'] = self._io.pos()
self._debug['u32_0x3c']['start'] = self._io.pos()
self.u32_0x3c = self._io.read_u4be()
self._debug['u32_0x3c']['end'] = self._io.pos()
class EnemyInstructionAttack(KaitaiStruct):
SEQ_FIELDS = ["unused_1", "unused_2"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
self._read()
def _read(self):
self._debug['unused_1']['start'] = self._io.pos()
self.unused_1 = self._io.read_u4be()
self._debug['unused_1']['end'] = self._io.pos()
self._debug['unused_2']['start'] = self._io.pos()
self.unused_2 = self._io.read_u4be()
self._debug['unused_2']['end'] = self._io.pos()
class EnemyInstructionRest(KaitaiStruct):
SEQ_FIELDS = ["flags", "anim_start_playing"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
self._read()
def _read(self):
self._debug['flags']['start'] = self._io.pos()
self.flags = self._io.read_u4be()
self._debug['flags']['end'] = self._io.pos()
self._debug['anim_start_playing']['start'] = self._io.pos()
self.anim_start_playing = self._io.read_u4be()
self._debug['anim_start_playing']['end'] = self._io.pos()
class LookAtBall0x61(KaitaiStruct):
SEQ_FIELDS = ["u32_0x6c", "u32_0x1c"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
self._read()
def _read(self):
self._debug['u32_0x6c']['start'] = self._io.pos()
self.u32_0x6c = self._io.read_u4be()
self._debug['u32_0x6c']['end'] = self._io.pos()
self._debug['u32_0x1c']['start'] = self._io.pos()
self.u32_0x1c = self._io.read_u4be()
self._debug['u32_0x1c']['end'] = self._io.pos()
class LookAtHand0x60(KaitaiStruct):
SEQ_FIELDS = ["u32_0x6c", "u32_0x1c"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
self._read()
def _read(self):
self._debug['u32_0x6c']['start'] = self._io.pos()
self.u32_0x6c = self._io.read_u4be()
self._debug['u32_0x6c']['end'] = self._io.pos()
self._debug['u32_0x1c']['start'] = self._io.pos()
self.u32_0x1c = self._io.read_u4be()
self._debug['u32_0x1c']['end'] = self._io.pos()
class CameoInst2(KaitaiStruct):
SEQ_FIELDS = ["h_0x00", "i_0x02", "i_0x06", "i_0x0a", "i_0x0e", "h_0x12", "h_0x14"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
self._read()
def _read(self):
self._debug['h_0x00']['start'] = self._io.pos()
self.h_0x00 = self._io.read_u2be()
self._debug['h_0x00']['end'] = self._io.pos()
self._debug['i_0x02']['start'] = self._io.pos()
self.i_0x02 = self._io.read_u4be()
self._debug['i_0x02']['end'] = self._io.pos()
self._debug['i_0x06']['start'] = self._io.pos()
self.i_0x06 = self._io.read_u4be()
self._debug['i_0x06']['end'] = self._io.pos()
self._debug['i_0x0a']['start'] = self._io.pos()
self.i_0x0a = self._io.read_u4be()
self._debug['i_0x0a']['end'] = self._io.pos()
self._debug['i_0x0e']['start'] = self._io.pos()
self.i_0x0e = self._io.read_u4be()
self._debug['i_0x0e']['end'] = self._io.pos()
self._debug['h_0x12']['start'] = self._io.pos()
self.h_0x12 = self._io.read_u2be()
self._debug['h_0x12']['end'] = self._io.pos()
self._debug['h_0x14']['start'] = self._io.pos()
self.h_0x14 = self._io.read_u2be()
self._debug['h_0x14']['end'] = self._io.pos()
class Unknown0xa9(KaitaiStruct):
SEQ_FIELDS = ["i_0x00"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
self._read()
def _read(self):
self._debug['i_0x00']['start'] = self._io.pos()
self.i_0x00 = self._io.read_u4be()
self._debug['i_0x00']['end'] = self._io.pos()
class PlatVentAdvanceFrames(KaitaiStruct):
SEQ_FIELDS = ["num_frames"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
self._read()
def _read(self):
self._debug['num_frames']['start'] = self._io.pos()
self.num_frames = self._io.read_u2be()
self._debug['num_frames']['end'] = self._io.pos()
class SetExit(KaitaiStruct):
SEQ_FIELDS = ["type", "visible"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
self._read()
def _read(self):
self._debug['type']['start'] = self._io.pos()
self.type = self._io.read_u2be()
self._debug['type']['end'] = self._io.pos()
self._debug['visible']['start'] = self._io.pos()
self.visible = self._io.read_u2be()
self._debug['visible']['end'] = self._io.pos()
class PlatSound0xc1(KaitaiStruct):
SEQ_FIELDS = ["sound_id", "volume", "pitch"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
self._read()
def _read(self):
self._debug['sound_id']['start'] = self._io.pos()
self.sound_id = self._io.read_u2be()
self._debug['sound_id']['end'] = self._io.pos()
self._debug['volume']['start'] = self._io.pos()
self.volume = self._io.read_u2be()
self._debug['volume']['end'] = self._io.pos()
self._debug['pitch']['start'] = self._io.pos()
self.pitch = self._io.read_u2be()
self._debug['pitch']['end'] = self._io.pos()
class PlatActorEnableWaterAnimation(KaitaiStruct):
SEQ_FIELDS = []
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
self._read()
def _read(self):
pass
class EnemyInstructionC(KaitaiStruct):
SEQ_FIELDS = ["u32_0x02", "u32_0x0e"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
self._read()
def _read(self):
self._debug['u32_0x02']['start'] = self._io.pos()
self.u32_0x02 = self._io.read_u4be()
self._debug['u32_0x02']['end'] = self._io.pos()
self._debug['u32_0x0e']['start'] = self._io.pos()
self.u32_0x0e = self._io.read_u4be()
self._debug['u32_0x0e']['end'] = self._io.pos()
class PuzzleAnd(KaitaiStruct):
SEQ_FIELDS = []
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
self._read()
def _read(self):
pass
class Plat0x66(KaitaiStruct):
SEQ_FIELDS = []
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root | |
import numpy as np
import random
import json
import h5py
from patch_library import PatchLibrary
from glob import glob
import matplotlib.pyplot as plt
from skimage import io, color, img_as_float
from skimage.exposure import adjust_gamma
from skimage.segmentation import mark_boundaries
from sklearn.feature_extraction.image import extract_patches_2d
from sklearn.metrics import classification_report
from keras.models import Sequential, Graph, model_from_json
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.layers.core import Dense, Dropout, Activation, Flatten, Merge, Reshape, MaxoutDense
from keras.layers.normalization import BatchNormalization
from keras.regularizers import l1l2
from keras.optimizers import SGD
from keras.constraints import maxnorm
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.utils import np_utils
class SegmentationModel(object):
def __init__(self, n_epoch=10, n_chan=4, batch_size=128, loaded_model=False, architecture='single', w_reg=0.01, n_filters=[64,128,128,128], k_dims = [7,5,5,3], activation = 'relu'):
'''
A class for compiling/loading, fitting and saving various models, viewing segmented images and analyzing results
INPUT (1) int 'n_epoch': number of eopchs to train on. defaults to 10
(2) int 'n_chan': number of channels being assessed. defaults to 4
(3) int 'batch_size': number of images to train on for each batch. defaults to 128
(4) bool 'loaded_model': True if loading a pre-existing model. defaults to False
(5) str 'architecture': type of model to use, options = single, dual, or two_path. defaults to single (only currently optimized version)
(6) float 'w_reg': value for l1 and l2 regularization. defaults to 0.01
(7) list 'n_filters': number of filters for each convolutional layer (4 total)
(8) list 'k_dims': dimension of kernel at each layer (will be a k_dim[n] x k_dim[n] square). Four total.
(9) string 'activation': activation to use at each convolutional layer. defaults to relu.
'''
self.n_epoch = n_epoch
self.n_chan = n_chan
self.batch_size = batch_size
self.architecture = architecture
self.loaded_model = loaded_model
self.w_reg = w_reg
self.n_filters = n_filters
self.k_dims = k_dims
self.activation = activation
if not self.loaded_model:
if self.architecture == 'two_path':
self.model_comp = self.comp_two_path()
elif self.architecture == 'dual':
self.model_comp = self.comp_double()
else:
self.model_comp = self.compile_model()
else:
model = str(raw_input('Which model should I load? '))
self.model_comp = self.load_model_weights(model)
def compile_model(self):
'''
compiles standard single model with 4 convolitional/max-pooling layers.
'''
print 'Compiling single model...'
single = Sequential()
single.add(Convolution2D(self.n_filters[0], self.k_dims[0], self.k_dims[0], border_mode='valid', W_regularizer=l1l2(l1=self.w_reg, l2=self.w_reg), input_shape=(self.n_chan,33,33)))
single.add(Activation(self.activation))
single.add(BatchNormalization(mode=0, axis=1))
single.add(MaxPooling2D(pool_size=(2,2), strides=(1,1)))
single.add(Dropout(0.5))
single.add(Convolution2D(self.n_filters[1], self.k_dims[1], self.k_dims[1], activation=self.activation, border_mode='valid', W_regularizer=l1l2(l1=self.w_reg, l2=self.w_reg)))
single.add(BatchNormalization(mode=0, axis=1))
single.add(MaxPooling2D(pool_size=(2,2), strides=(1,1)))
single.add(Dropout(0.5))
single.add(Convolution2D(self.n_filters[2], self.k_dims[2], self.k_dims[2], activation=self.activation, border_mode='valid', W_regularizer=l1l2(l1=self.w_reg, l2=self.w_reg)))
single.add(BatchNormalization(mode=0, axis=1))
single.add(MaxPooling2D(pool_size=(2,2), strides=(1,1)))
single.add(Dropout(0.5))
single.add(Convolution2D(self.n_filters[3], self.k_dims[3], self.k_dims[3], activation=self.activation, border_mode='valid', W_regularizer=l1l2(l1=self.w_reg, l2=self.w_reg)))
single.add(Dropout(0.25))
single.add(Flatten())
single.add(Dense(5))
single.add(Activation('softmax'))
sgd = SGD(lr=0.001, decay=0.01, momentum=0.9)
single.compile(loss='categorical_crossentropy', optimizer='sgd')
print 'Done.'
return single
def comp_two_path(self):
'''
compiles two-path model, takes in a 4x33x33 patch and assesses global and local paths, then merges the results.
'''
print 'Compiling two-path model...'
model = Graph()
model.add_input(name='input', input_shape=(self.n_chan, 33, 33))
# local pathway, first convolution/pooling
model.add_node(Convolution2D(64, 7, 7, border_mode='valid', activation='relu', W_regularizer=l1l2(l1=0.01, l2=0.01)), name='local_c1', input= 'input')
model.add_node(MaxPooling2D(pool_size=(4,4), strides=(1,1), border_mode='valid'), name='local_p1', input='local_c1')
# local pathway, second convolution/pooling
model.add_node(Dropout(0.5), name='drop_lp1', input='local_p1')
model.add_node(Convolution2D(64, 3, 3, border_mode='valid', activation='relu', W_regularizer=l1l2(l1=0.01, l2=0.01)), name='local_c2', input='drop_lp1')
model.add_node(MaxPooling2D(pool_size=(2,2), strides=(1,1), border_mode='valid'), name='local_p2', input='local_c2')
# global pathway
model.add_node(Convolution2D(160, 13, 13, border_mode='valid', activation='relu', W_regularizer=l1l2(l1=0.01, l2=0.01)), name='global', input='input')
# merge local and global pathways
model.add_node(Dropout(0.5), name='drop_lp2', input='local_p2')
model.add_node(Dropout(0.5), name='drop_g', input='global')
model.add_node(Convolution2D(5, 21, 21, border_mode='valid', activation='relu', W_regularizer=l1l2(l1=0.01, l2=0.01)), name='merge', inputs=['drop_lp2', 'drop_g'], merge_mode='concat', concat_axis=1)
# Flatten output of 5x1x1 to 1x5, perform softmax
model.add_node(Flatten(), name='flatten', input='merge')
model.add_node(Dense(5, activation='softmax'), name='dense_output', input='flatten')
model.add_output(name='output', input='dense_output')
sgd = SGD(lr=0.005, decay=0.1, momentum=0.9)
model.compile('sgd', loss={'output':'categorical_crossentropy'})
print 'Done.'
return model
def comp_double(self):
'''
double model. Simialar to two-pathway, except takes in a 4x33x33 patch and it's center 4x5x5 patch. merges paths at flatten layer.
'''
print 'Compiling double model...'
single = Sequential()
single.add(Convolution2D(64, 7, 7, border_mode='valid', W_regularizer=l1l2(l1=0.01, l2=0.01), input_shape=(4,33,33)))
single.add(Activation('relu'))
single.add(BatchNormalization(mode=0, axis=1))
single.add(MaxPooling2D(pool_size=(2,2), strides=(1,1)))
single.add(Dropout(0.5))
single.add(Convolution2D(nb_filter=128, nb_row=5, nb_col=5, activation='relu', border_mode='valid', W_regularizer=l1l2(l1=0.01, l2=0.01)))
single.add(BatchNormalization(mode=0, axis=1))
single.add(MaxPooling2D(pool_size=(2,2), strides=(1,1)))
single.add(Dropout(0.5))
single.add(Convolution2D(nb_filter=256, nb_row=5, nb_col=5, activation='relu', border_mode='valid', W_regularizer=l1l2(l1=0.01, l2=0.01)))
single.add(BatchNormalization(mode=0, axis=1))
single.add(MaxPooling2D(pool_size=(2,2), strides=(1,1)))
single.add(Dropout(0.5))
single.add(Convolution2D(nb_filter=128, nb_row=3, nb_col=3, activation='relu', border_mode='valid', W_regularizer=l1l2(l1=0.01, l2=0.01)))
single.add(Dropout(0.25))
single.add(Flatten())
# add small patch to train on
five = Sequential()
five.add(Reshape((100,1), input_shape = (4,5,5)))
five.add(Flatten())
five.add(MaxoutDense(128, nb_feature=5))
five.add(Dropout(0.5))
model = Sequential()
# merge both paths
model.add(Merge([five, single], mode='concat', concat_axis=1))
model.add(Dense(5))
model.add(Activation('softmax'))
sgd = SGD(lr=0.001, decay=0.01, momentum=0.9)
model.compile(loss='categorical_crossentropy', optimizer='sgd')
print 'Done.'
return model
def load_model_weights(self, model_name):
'''
INPUT (1) string 'model_name': filepath to model and weights, not including extension
OUTPUT: Model with loaded weights. can fit on model using loaded_model=True in fit_model method
'''
print 'Loading model {}'.format(model_name)
model = '{}.json'.format(model_name)
weights = '{}.hdf5'.format(model_name)
with open(model) as f:
m = f.next()
model_comp = model_from_json(json.loads(m))
model_comp.load_weights(weights)
print 'Done.'
return model_comp
def fit_model(self, X_train, y_train, X5_train = None, save=True):
'''
INPUT (1) numpy array 'X_train': list of patches to train on in form (n_sample, n_channel, h, w)
(2) numpy vector 'y_train': list of labels corresponding to X_train patches in form (n_sample,)
(3) numpy array 'X5_train': center 5x5 patch in corresponding X_train patch. if None, uses single-path architecture
OUTPUT (1) Fits specified model
'''
Y_train = np_utils.to_categorical(y_train, 5)
shuffle = zip(X_train, Y_train)
np.random.shuffle(shuffle)
X_train = np.array([shuffle[i][0] for i in xrange(len(shuffle))])
Y_train = np.array([shuffle[i][1] for i in xrange(len(shuffle))])
es = EarlyStopping(monitor='val_loss', patience=2, verbose=1, mode='auto')
# Save model after each epoch to check/bm_epoch#-val_loss
checkpointer = ModelCheckpoint(filepath="./check/bm_{epoch:02d}-{val_loss:.2f}.hdf5", verbose=1)
if self.architecture == 'dual':
self.model_comp.fit([X5_train, X_train], Y_train, batch_size=self.batch_size, nb_epoch=self.n_epoch, validation_split=0.1, show_accuracy=True, verbose=1, callbacks=[checkpointer])
elif self.architecture == 'two_path':
data = {'input': X_train, 'output': Y_train}
self.model_comp.fit(data, batch_size=self.batch_size, nb_epoch=self.n_epoch, validation_split=0.1, show_accuracy=True, verbose=1, callbacks=[checkpointer])
else:
self.model_comp.fit(X_train, Y_train, batch_size=self.batch_size, nb_epoch=self.n_epoch, validation_split=0.1, show_accuracy=True, verbose=1, callbacks=[checkpointer])
def save_model(self, model_name):
'''
INPUT string 'model_name': name to save model and weigths under, including filepath but not extension
Saves current model as json and weigts as h5df file
'''
model = '{}.json'.format(model_name)
weights = '{}.hdf5'.format(model_name)
json_string = self.model_comp.to_json()
self.model_comp.save_weights(weights)
with open(model, 'w') as f:
json.dump(json_string, f)
def class_report(self, X_test, y_test):
'''
returns skilearns test report (precision, recall, f1-score)
INPUT (1) list 'X_test': test data of 4x33x33 patches
(2) list 'y_test': labels for X_test
OUTPUT (1) confusion matrix of precision, recall and f1 score
'''
y_pred = self.model_load.predict_class(X_test)
print classification_report(y_pred, y_test)
def predict_image(self, test_img, show=False):
'''
predicts classes of input image
INPUT (1) str 'test_image': filepath to image to predict on
(2) bool 'show': True to show the results of prediction, False to return prediction
OUTPUT (1) if show == False: array of predicted pixel classes for the center 208 x 208 pixels
(2) if show == True: displays segmentation results
'''
imgs = io.imread(test_img).astype('float').reshape(5,240,240)
plist = []
# create patches from an entire slice
for img in imgs[:-1]:
if np.max(img) != 0:
img /= np.max(img)
p = extract_patches_2d(img, (33,33))
plist.append(p)
patches = np.array(zip(np.array(plist[0]), np.array(plist[1]), np.array(plist[2]), np.array(plist[3])))
# predict classes of each pixel based on model
full_pred = self.model_comp.predict_classes(patches)
fp1 = full_pred.reshape(208,208)
if show:
io.imshow(fp1)
plt.show
else:
return fp1
def show_segmented_image(self, test_img, modality='t1c', show = False):
'''
Creates an image of original brain with segmentation overlay
INPUT (1) str 'test_img': filepath to test image for segmentation, including file extension
(2) str 'modality': imaging modelity to use as background. defaults to t1c. options: (flair, t1, t1c, t2)
(3) bool 'show': If true, shows output image. defaults to False.
OUTPUT (1) if show is True, shows image of segmentation results
(2) if show is false, returns segmented image.
'''
modes = {'flair':0, 't1':1, 't1c':2, 't2':3}
segmentation = self.predict_image(test_img, show=False)
img_mask = np.pad(segmentation, (16,16), mode='edge')
ones = np.argwhere(img_mask == 1)
twos = np.argwhere(img_mask == 2)
threes = np.argwhere(img_mask == 3)
fours = np.argwhere(img_mask == 4)
test_im = io.imread(test_img)
test_back = test_im.reshape(5,240,240)[-2]
# overlay = mark_boundaries(test_back, img_mask)
gray_img = img_as_float(test_back)
# adjust gamma of image
image = adjust_gamma(color.gray2rgb(gray_img), 0.65)
sliced_image = image.copy()
red_multiplier = [1, 0.2, 0.2]
yellow_multiplier = [1,1,0.25]
green_multiplier = [0.35,0.75,0.25]
blue_multiplier = [0,0.25,0.9]
# change colors of segmented classes
for i in xrange(len(ones)):
sliced_image[ones[i][0]][ones[i][1]] = red_multiplier
for i in xrange(len(twos)):
sliced_image[twos[i][0]][twos[i][1]] = green_multiplier
for i in xrange(len(threes)):
sliced_image[threes[i][0]][threes[i][1]] = blue_multiplier
for i in xrange(len(fours)):
sliced_image[fours[i][0]][fours[i][1]] = yellow_multiplier
if show:
io.imshow(sliced_image)
plt.show()
else:
return sliced_image
def get_dice_coef(self, test_img, label):
'''
Calculate dice coefficient for total slice, tumor-associated slice, advancing tumor and core tumor
INPUT (1) str 'test_img': filepath to slice to predict on
(2) str 'label': filepath to ground truth label for test_img
OUTPUT: Summary of dice scores for the following classes:
- all classes
- | |
import pytest
import salt.modules.useradd as useradd
from salt.exceptions import CommandExecutionError
from tests.support.mock import MagicMock, patch
@pytest.fixture
def configure_loader_modules():
return {
useradd: {
"__grains__": {
"kernel": "Linux",
"osarch": "x86_64",
"os": "CentOS",
"os_family": "RedHat",
"osmajorrelease": 8,
},
"__salt__": {},
}
}
def test_add():
# command found and successful run
mock = MagicMock(return_value={"retcode": 0})
with patch(
"salt.utils.path.which", MagicMock(return_value="/sbin/useradd")
), patch.dict(useradd.__salt__, {"cmd.run_all": mock}):
assert useradd.add("Salt") is True
mock.assert_called_once_with(["/sbin/useradd", "-m", "Salt"], python_shell=False)
# command found and unsuccessful run
mock = MagicMock(return_value={"retcode": 1})
with patch(
"salt.utils.path.which", MagicMock(return_value="/sbin/useradd")
), patch.dict(useradd.__salt__, {"cmd.run_all": mock}):
assert useradd.add("Salt") is False
mock.assert_called_once_with(["/sbin/useradd", "-m", "Salt"], python_shell=False)
# command not found
mock = MagicMock()
with patch("salt.utils.path.which", MagicMock(return_value=None)), patch.dict(
useradd.__salt__, {"cmd.run_all": mock}
):
with pytest.raises(CommandExecutionError):
useradd.add("Salt")
mock.assert_not_called()
def test_delete():
# command found and successful run
mock = MagicMock(return_value={"retcode": 0})
with patch(
"salt.utils.path.which", MagicMock(return_value="/sbin/userdel")
), patch.dict(useradd.__salt__, {"cmd.run_all": mock}):
assert useradd.delete("Salt") is True
mock.assert_called_once_with(["/sbin/userdel", "Salt"], python_shell=False)
# command found and unsuccessful run
mock = MagicMock(return_value={"retcode": 1})
with patch(
"salt.utils.path.which", MagicMock(return_value="/sbin/userdel")
), patch.dict(useradd.__salt__, {"cmd.run_all": mock}):
assert useradd.delete("Salt") is False
mock.assert_called_once_with(["/sbin/userdel", "Salt"], python_shell=False)
# command not found
mock = MagicMock()
with patch("salt.utils.path.which", MagicMock(return_value=None)), patch.dict(
useradd.__salt__, {"cmd.run_all": mock}
):
with pytest.raises(CommandExecutionError):
useradd.delete("Salt")
mock.assert_not_called()
def test_chgroups():
# groups matched - no command run
mock = MagicMock()
with patch.object(
useradd, "list_groups", MagicMock(return_value=["wheel", "root"])
), patch.dict(useradd.__salt__, {"cmd.run_all": mock}):
assert useradd.chgroups("Salt", "wheel,root") is True
mock.assert_not_called()
# command found and successful run
mock = MagicMock(return_value={"retcode": 0})
with patch(
"salt.utils.path.which", MagicMock(return_value="/sbin/usermod")
), patch.dict(useradd.__salt__, {"cmd.run_all": mock}):
assert useradd.chgroups("Salt", "wheel,root") is True
mock.assert_called_once_with(
["/sbin/usermod", "-G", "wheel,root", "Salt"], python_shell=False
)
# command found and unsuccessful run
mock = MagicMock(return_value={"retcode": 1, "stderr": ""})
with patch(
"salt.utils.path.which", MagicMock(return_value="/sbin/usermod")
), patch.dict(useradd.__salt__, {"cmd.run_all": mock}):
assert useradd.chgroups("Salt", "wheel,root") is False
mock.assert_called_once_with(
["/sbin/usermod", "-G", "wheel,root", "Salt"], python_shell=False
)
# command not found
mock = MagicMock()
with patch("salt.utils.path.which", MagicMock(return_value=None)), patch.dict(
useradd.__salt__, {"cmd.run_all": mock}
):
with pytest.raises(CommandExecutionError):
useradd.chgroups("Salt", "wheel,root")
mock.assert_not_called()
def test_chloginclass():
# only runs on OpenBSD
assert useradd.chloginclass("Salt", "staff") is False
with patch.dict(useradd.__grains__, {"kernel": "OpenBSD"}):
# command found and successful run
userinfo = ["class salt", "class staff"]
mock = MagicMock(return_value={"retcode": 0})
with patch(
"salt.utils.path.which", MagicMock(return_value="/sbin/usermod")
), patch.dict(
useradd.__salt__, {"cmd.run_stdout": MagicMock(side_effect=userinfo)}
), patch.dict(
useradd.__salt__, {"cmd.run": mock}
):
assert useradd.chloginclass("Salt", "staff") is True
mock.assert_called_once_with(
["/sbin/usermod", "-L", "staff", "Salt"], python_shell=False
)
# command found and unsuccessful run
userinfo = ["class salt", "class salt"]
mock = MagicMock(return_value={"retcode": 1, "stderr": ""})
with patch(
"salt.utils.path.which", MagicMock(return_value="/sbin/usermod")
), patch.dict(
useradd.__salt__, {"cmd.run_stdout": MagicMock(side_effect=userinfo)}
), patch.dict(
useradd.__salt__, {"cmd.run": mock}
):
assert useradd.chloginclass("Salt", "staff") is False
mock.assert_called_once_with(
["/sbin/usermod", "-L", "staff", "Salt"], python_shell=False
)
# command not found
userinfo = ["class salt"]
mock = MagicMock()
with patch("salt.utils.path.which", MagicMock(return_value=None)), patch.dict(
useradd.__salt__, {"cmd.run_stdout": MagicMock(side_effect=userinfo)}
), patch.dict(useradd.__salt__, {"cmd.run": mock}):
with pytest.raises(CommandExecutionError):
useradd.chloginclass("Salt", "staff")
mock.assert_not_called()
def test__chattrib():
# command found and successful run
mock = MagicMock(return_value={"retcode": 0})
with patch(
"salt.utils.path.which", MagicMock(return_value="/sbin/usermod")
), patch.object(
useradd, "info", MagicMock(side_effect=[{"uid": 10}, {"uid": 11}])
), patch.dict(
useradd.__salt__, {"cmd.run": mock}
):
assert useradd._chattrib("Salt", "uid", 11, "-u") is True
mock.assert_called_once_with(
["/sbin/usermod", "-u", 11, "Salt"], python_shell=False
)
# command found and unsuccessful run
mock = MagicMock(return_value={"retcode": 1})
with patch(
"salt.utils.path.which", MagicMock(return_value="/sbin/usermod")
), patch.object(
useradd, "info", MagicMock(side_effect=[{"uid": 10}, {"uid": 10}])
), patch.dict(
useradd.__salt__, {"cmd.run": mock}
):
assert useradd._chattrib("Salt", "uid", 11, "-u") is False
mock.assert_called_once_with(
["/sbin/usermod", "-u", 11, "Salt"], python_shell=False
)
# command not found
mock = MagicMock()
with patch("salt.utils.path.which", MagicMock(return_value=None)), patch.object(
useradd, "info", MagicMock(return_value={"uid": 10})
), patch.dict(useradd.__salt__, {"cmd.run_all": mock}):
with pytest.raises(CommandExecutionError):
useradd._chattrib("Salt", "uid", 11, "-u")
mock.assert_not_called()
def test__update_gecos():
pre_info = {"fullname": "<NAME>"}
post_info = {"fullname": "<NAME>"}
# command found and successful run
mock = MagicMock(return_value={"retcode": 0})
with patch(
"salt.utils.path.which", MagicMock(return_value="/sbin/usermod")
), patch.object(
useradd, "_get_gecos", MagicMock(side_effect=[pre_info, post_info])
), patch.dict(
useradd.__salt__, {"cmd.run": mock}
):
assert useradd._update_gecos("Salt", "fullname", post_info["fullname"]) is True
mock.assert_called_once_with(
["/sbin/usermod", "-c", "<NAME>", "Salt"], python_shell=False
)
# command found and unsuccessful run
mock = MagicMock(return_value={"retcode": 1})
with patch(
"salt.utils.path.which", MagicMock(return_value="/sbin/usermod")
), patch.object(
useradd, "_get_gecos", MagicMock(side_effect=[pre_info, pre_info])
), patch.dict(
useradd.__salt__, {"cmd.run": mock}
):
assert useradd._update_gecos("Salt", "fullname", post_info["fullname"]) is False
mock.assert_called_once_with(
["/sbin/usermod", "-c", "<NAME>", "Salt"], python_shell=False
)
# command not found
mock = MagicMock()
with patch("salt.utils.path.which", MagicMock(return_value=None)), patch.object(
useradd, "_get_gecos", MagicMock(side_effect=[pre_info, pre_info])
), patch.dict(useradd.__salt__, {"cmd.run": mock}):
with pytest.raises(CommandExecutionError):
useradd._update_gecos("Salt", "fullname", post_info["fullname"])
mock.assert_not_called()
def test_rename():
# command not found
with patch("salt.utils.path.which", MagicMock(return_value=None)):
mock = MagicMock()
with patch.object(
useradd, "info", MagicMock(return_value={"uid": 10})
), patch.dict(useradd.__salt__, {"cmd.run": mock}):
with pytest.raises(CommandExecutionError):
useradd.rename("salt", 1)
mock.assert_not_called()
# command found
with patch("salt.utils.path.which", MagicMock(return_value="/sbin/usermod")):
mock = MagicMock(return_value=False)
with patch.object(useradd, "info", mock):
with pytest.raises(CommandExecutionError):
useradd.rename("salt", 1)
mock = MagicMock(return_value=True)
with patch.object(useradd, "info", mock):
with pytest.raises(CommandExecutionError):
useradd.rename("salt", 1)
mock = MagicMock(return_value=None)
with patch.dict(useradd.__salt__, {"cmd.run": mock}):
mock = MagicMock(side_effect=[False, {"name": ""}, {"name": "salt"}])
with patch.object(useradd, "info", mock):
assert useradd.rename("name", "salt") is True
mock = MagicMock(return_value=None)
with patch.dict(useradd.__salt__, {"cmd.run": mock}):
mock = MagicMock(side_effect=[False, {"name": ""}, {"name": ""}])
with patch.object(useradd, "info", mock):
assert useradd.rename("salt", "salt") is False
def test_chuid():
# command not found
with patch("salt.utils.path.which", MagicMock(return_value=None)):
mock = MagicMock()
with patch.object(
useradd, "info", MagicMock(return_value={"uid": 10})
), patch.dict(useradd.__salt__, {"cmd.run": mock}):
with pytest.raises(CommandExecutionError):
useradd.chuid("salt", 1)
mock.assert_not_called()
# command found
with patch("salt.utils.path.which", MagicMock(return_value="/sbin/usermod")):
mock = MagicMock(return_value={"uid": 11})
with patch.object(useradd, "info", mock):
assert useradd.chuid("name", 11) is True
mock_run = MagicMock(return_value=None)
with patch.dict(useradd.__salt__, {"cmd.run": mock_run}):
mock = MagicMock(side_effect=[{"uid": 11}, {"uid": 11}])
with patch.object(useradd, "info", mock):
assert useradd.chuid("name", 22) is False
with patch.dict(useradd.__salt__, {"cmd.run": mock_run}):
mock = MagicMock(side_effect=[{"uid": 11}, {"uid": 22}])
with patch.object(useradd, "info", mock):
assert useradd.chuid("name", 11) is True
def test_chgid():
# command not found
with patch("salt.utils.path.which", MagicMock(return_value=None)):
mock = MagicMock()
with patch.object(
useradd, "info", MagicMock(return_value={"gid": 10})
), patch.dict(useradd.__salt__, {"cmd.run": mock}):
with pytest.raises(CommandExecutionError):
useradd.chgid("salt", 1)
mock.assert_not_called()
# command found
with patch("salt.utils.path.which", MagicMock(return_value="/sbin/usermod")):
mock = MagicMock(return_value={"gid": 11})
with patch.object(useradd, "info", mock):
assert useradd.chgid("name", 11) is True
mock_run = MagicMock(return_value=None)
with patch.dict(useradd.__salt__, {"cmd.run": mock_run}):
mock = MagicMock(side_effect=[{"gid": 22}, {"gid": 22}])
with patch.object(useradd, "info", mock):
assert useradd.chgid("name", 11) is False
with patch.dict(useradd.__salt__, {"cmd.run": mock_run}):
mock = MagicMock(side_effect=[{"gid": 11}, {"gid": 22}])
with patch.object(useradd, "info", mock):
assert useradd.chgid("name", 11) is True
def test_chshell():
# command not found
with patch("salt.utils.path.which", MagicMock(return_value=None)):
mock = MagicMock()
with patch.object(
useradd, "info", MagicMock(return_value={"shell": "/bin/bash"})
), patch.dict(useradd.__salt__, {"cmd.run": mock}):
with pytest.raises(CommandExecutionError):
useradd.chshell("salt", "/usr/bash")
mock.assert_not_called()
# command found
with patch("salt.utils.path.which", MagicMock(return_value="/sbin/usermod")):
mock = MagicMock(return_value={"shell": "/bin/bash"})
with patch.object(useradd, "info", mock):
assert useradd.chshell("name", "/bin/bash") is True
mock_run = MagicMock(return_value=None)
with patch.dict(useradd.__salt__, {"cmd.run": mock_run}):
mock = MagicMock(
side_effect=[{"shell": "/bin/bash"}, {"shell": "/bin/bash"}]
)
with patch.object(useradd, "info", mock):
assert useradd.chshell("name", "/usr/bash") is False
with patch.dict(useradd.__salt__, {"cmd.run": mock_run}):
mock = MagicMock(
side_effect=[{"shell": "/bin/bash"}, {"shell": "/usr/bash"}]
)
with patch.object(useradd, "info", mock):
assert useradd.chshell("name", "/bin/bash") is True
def test_chhome():
# command not found
with patch("salt.utils.path.which", MagicMock(return_value=None)):
mock = MagicMock()
with patch.object(
useradd, "info", MagicMock(return_value={"home": "/root"})
), patch.dict(useradd.__salt__, {"cmd.run": mock}):
with pytest.raises(CommandExecutionError):
useradd.chhome("salt", "/user")
mock.assert_not_called()
# command found
with patch("salt.utils.path.which", MagicMock(return_value="/sbin/usermod")):
mock = MagicMock(return_value={"home": "/root"})
with patch.object(useradd, "info", mock):
assert useradd.chhome("name", "/root") is True
mock = MagicMock(return_value=None)
with patch.dict(useradd.__salt__, {"cmd.run": mock}):
mock = MagicMock(side_effect=[{"home": "/root"}, {"home": "/root"}])
with patch.object(useradd, "info", mock):
assert useradd.chhome("name", "/user") is False
mock = MagicMock(return_value=None)
with patch.dict(useradd.__salt__, {"cmd.run": mock}):
mock = MagicMock(side_effect=[{"home": "/root"}, {"home": "/root"}])
with patch.object(useradd, "info", mock):
assert useradd.chhome("name", "/root") is True
def test_chfullname():
# command not found
with patch("salt.utils.path.which", MagicMock(return_value=None)):
mock = MagicMock()
with patch.object(
useradd, "_get_gecos", MagicMock(return_value={"fullname": "Salt"})
), patch.dict(useradd.__salt__, {"cmd.run": mock}):
with pytest.raises(CommandExecutionError):
useradd.chfullname("salt", "Saltstack")
mock.assert_not_called()
# command found
with patch("salt.utils.path.which", MagicMock(return_value="/sbin/usermod")):
mock = MagicMock(return_value=False)
with patch.object(useradd, "_get_gecos", mock):
assert useradd.chfullname("Salt", "SaltStack") is False
mock = MagicMock(return_value={"fullname": "SaltStack"})
with patch.object(useradd, "_get_gecos", mock):
assert useradd.chfullname("Salt", "SaltStack") is True
mock = MagicMock(return_value={"fullname": "SaltStack"})
with patch.object(useradd, "_get_gecos", mock):
mock = MagicMock(return_value=None)
with patch.dict(useradd.__salt__, {"cmd.run": mock}):
mock = MagicMock(return_value={"fullname": "SaltStack2"})
with patch.object(useradd, "info", mock):
assert useradd.chfullname("Salt", "SaltStack1") is False
mock = MagicMock(return_value={"fullname": "SaltStack2"})
with patch.object(useradd, "_get_gecos", mock):
mock = MagicMock(return_value=None)
with patch.dict(useradd.__salt__, {"cmd.run": mock}):
mock = MagicMock(return_value={"fullname": "SaltStack2"})
with patch.object(useradd, "info", mock):
assert useradd.chfullname("Salt", "SaltStack1") is False
def test_chroomnumber():
# command not found
with patch("salt.utils.path.which", MagicMock(return_value=None)):
mock = MagicMock()
with patch.object(
useradd, "_get_gecos", MagicMock(return_value={"roomnumber": "1"})
), patch.dict(useradd.__salt__, {"cmd.run": mock}):
with pytest.raises(CommandExecutionError):
useradd.chroomnumber("salt", 2)
mock.assert_not_called()
# command found
with patch("salt.utils.path.which", MagicMock(return_value="/sbin/usermod")):
mock = MagicMock(return_value=False)
with patch.object(useradd, "_get_gecos", mock):
assert useradd.chroomnumber("salt", 1) is False
mock = MagicMock(return_value={"roomnumber": "1"})
with patch.object(useradd, "_get_gecos", mock):
assert useradd.chroomnumber("salt", 1) is True
mock = MagicMock(return_value={"roomnumber": "2"})
with patch.object(useradd, "_get_gecos", mock):
mock = MagicMock(return_value=None)
with patch.dict(useradd.__salt__, {"cmd.run": mock}):
mock = MagicMock(return_value={"roomnumber": "3"})
with patch.object(useradd, | |
# -*- coding: iso-8859-15 -*-
#
# This software was written by <NAME> (<NAME>)
# Copyright <NAME>
# All rights reserved
# This software is licenced under a 3-clause BSD style license
#
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions are met:
#
#Redistributions of source code must retain the above copyright notice,
#this list of conditions and the following disclaimer.
#
#Redistributions in binary form must reproduce the above copyright notice,
#this list of conditions and the following disclaimer in the documentation
#and/or other materials provided with the distribution.
#
#Neither the name of the University College London nor the names
#of the code contributors may be used to endorse or promote products
#derived from this software without specific prior written permission.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
#AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
#THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
#PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
#CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
#EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
#PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
#OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
#WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
#OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
#ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
# Developed by <NAME> (MSSL/UCL)
# uvotpy
# (c) 2009-2017, see Licence
from future.builtins import str
from future.builtins import input
from future.builtins import range
__version__ = '2.9.0 20171209'
import sys
import optparse
import numpy as np
import matplotlib.pyplot as plt
try:
from astropy.io import fits as pyfits
from astropy import wcs
except:
import pyfits
import re
import warnings
try:
import imagestats
except:
import stsci.imagestats as imagestats
import scipy
from scipy import interpolate
from scipy.ndimage import convolve
from scipy.signal import boxcar
from scipy.optimize import leastsq
from scipy.special import erf
from numpy import polyfit, polyval
'''
try:
#from uvotpy import uvotplot,uvotmisc,uvotwcs,rationalfit,mpfit,uvotio
import uvotplot
import uvotmisc
import uvotwcs
import rationalfit
import mpfit
import uvotio
except:
pass
'''
from uvotmisc import interpgrid, uvotrotvec, rdTab, rdList
from generate_USNOB1_cat import get_usnob1_cat
import datetime
import os
if __name__ != '__main__':
anchor_preset = list([None,None])
bg_pix_limits = list([-100,-70,70,100])
bg_lower_ = list([None,None]) # (offset, width) in pix, e.g., [20,30], default [50,50]
bg_upper_ = list([None,None]) # (offset, width) in pix, e.g., [20,30], default [50,50]
offsetlimit = None
#set Global parameters
status = 0
do_coi_correction = True # if not set, disable coi_correction
tempnames = list()
tempntags = list()
cval = -1.0123456789
interactive = True
update_curve = True
contour_on_img = False
give_result = False # with this set, a call to getSpec returns all data
give_new_result = False
use_rectext = False
background_method = 'boxcar' # alternatives 'splinefit' 'boxcar'
background_smoothing = [50,7] # 'boxcar' default smoothing in dispersion and across dispersion in pix
background_interpolation = 'linear'
trackcentroiding = True # default (= False will disable track y-centroiding)
global trackwidth
trackwidth = 2.5 # width of extraction region in sigma (alternative default = 1.0) 2.5 was used for flux calibration.
bluetrackwidth = 1.3 # multiplier width of non-order-overlapped extraction region [not yet active]
write_RMF = False
background_source_mag = 18.0
zeroth_blim_offset = 1.0
coi_half_width = None
slit_width = 200
_PROFILE_BACKGROUND_ = False # start with severe sigma-clip f background, before going to smoothing
today_ = datetime.date.today()
datestring = today_.isoformat()[0:4]+today_.isoformat()[5:7]+today_.isoformat()[8:10]
fileversion=1
calmode=True
typeNone = type(None)
senscorr = True # do sensitivity correction
print(66*"=")
print("uvotpy module uvotgetspec version=",__version__)
print("<NAME> (c) 2009-2017, see uvotpy licence.")
print("please use reference provided at http://github.com/PaulKuin/uvotpy")
print(66*"=","\n")
def getSpec(RA,DEC,obsid, ext, indir='./', wr_outfile=True,
outfile=None, calfile=None, fluxcalfile=None,
use_lenticular_image=True,
offsetlimit=None, anchor_offset=None, anchor_position=[None,None],
background_lower=[None,None], background_upper=[None,None],
background_template=None,
fixed_angle=None, spextwidth=13, curved="update",
fit_second=False, predict2nd=True, skip_field_src=False,
optimal_extraction=False, catspec=None,write_RMF=write_RMF,
get_curve=None,fit_sigmas=True,get_sigma_poly=False,
lfilt1=None, lfilt1_ext=None, lfilt2=None, lfilt2_ext=None,
wheelpos=None, interactive=interactive, sumimage=None, set_maglimit=None,
plot_img=True, plot_raw=True, plot_spec=True, zoom=True, highlight=False,
uvotgraspcorr_on=True, ank_c_0offset = False,
update_pnt=True, ifmotion=False, motion_file=None, anchor_x_offset=False,
replace=None,ifextended=False, singleside_bkg = False, fixwidth = False,
clobber=False, chatter=1):
'''Makes all the necessary calls to reduce the data.
Parameters
----------
ra, dec : float
The Sky position (J2000) in **decimal degrees**
obsid : str
The observation ID number as a **String**. Typically that is
something like "00032331001" and should be part of your
grism filename which is something like "sw00032331001ugu_dt.img"
ext : int
number of the extension to process
kwargs : dict
optional keyword arguments, possible values are:
- **fit_second** : bool
fit the second order. Off since it sometimes causes problems when the
orders overlap completely. Useful for spectra in top part detector
- **background_lower** : list
instead of default background list offset from spectrum as list
of two numbers, like [20, 40]. Distance relative to spectrum
- **background_upper** : list
instead of default background list offset from spectrum as list
of two numbers, like [20, 40]. Distance relative to spectrum
- **offsetlimit** : None,int,[center,range]
Default behaviour is to determine automatically any required offset from
the predicted anchor position to the spectrum, and correct for that.
The automated method may fail in the case of a weak spectrum and strong zeroth
or first order next to the spectrum. Two methods are provided:
(1) provide a number which will be used to limit the allowed offset. If
within that limit no peak is identified, the program will stop and require
you to provide a manual offset value. Try small numbers like 1, -1, 3, etc..
(2) if you already know the approximate y-location of the spectrum at the
anchor x-position in the rotated small image strip around the spectrum, you
can give this with a small allowed range for fine tuning as a list of two
parameter values. The first value in the list must be the y-coordinate
(by default the spectrum falls close to y=100 pixels), the second parameter
the allowed adjustment to a peak value in pixels. For example, [105,2].
This will require no further interactive input, and the spectrum will be
extracted using that offset.
- **wheelpos**: {160,200,955,1000}
filter wheel position for the grism filter mode used. Helpful for
forcing Vgrism or UVgrism input when both are present in the directory.
160:UV Clocked, 200:UV Nominal, 955:V clocked, 1000:V nominal
- **zoom** : bool
when False, the whole extracted region is displayed, including zeroth
order when present.
- **clobber** : bool
When True, overwrite earlier output (see also outfile)
- **write_RMF** : bool
When True, write the rmf file (will take extra time due to large matrix operations)
- **use_lenticular_image** : bool
When True and a lenticular image is present, it is used. If False,
the grism image header WCS-S system will be used for the astrometry,
with an automatic call to uvotgraspcorr for refinement.
- **sumimage** : str
Name summed image generated using ``sum_Extimage()``, will extract spectrum
from summed image.
- **wr_outfile** : bool
If False, no output file is written
- **outfile** : path, str
Name of output file, other than automatically generated.
- **calfile** : path, str
calibration file name
- **fluxcalfile** : path, str
flux calibration file name or "CALDB" or None
- **predict2nd** : bool
predict the second order flux from the first. Overestimates in centre a lot.
- **skip_field_src** : bool
if True do not locate zeroth order positions. Can be used if
absence internet connection or USNO-B1 server causes problems.
- **optimal_extraction** : bool, obsolete
Do not use.Better results with other implementation.
- **catspec** : path
optional full path to the | |
<gh_stars>1-10
# type: ignore
from copy import deepcopy
from typing import Any, Dict, List, Optional, Tuple
from urllib.parse import urljoin
import urllib3
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
# disable insecure warnings
urllib3.disable_warnings()
DEFAULT_PAGE = 1
DEFAULT_LIMIT = 50
DEFAULT_MAX_FETCH = DEFAULT_LIMIT
DEFAULT_EVENTS_FETCH = DEFAULT_LIMIT
DEFAULT_EVENT_TYPE = 'application'
DEFAULT_FIRST_FETCH = '7 days'
MAX_LIMIT = 100
MAX_FETCH = 200
MAX_EVENTS_FETCH = 200
TIME_PERIOD_MAPPING = {
'Last 60 Minutes': 3600,
'Last 24 Hours': 86400,
'Last 7 Days': 604800,
'Last 30 Days': 2592000,
'Last 60 Days': 5184000,
'Last 90 Days': 7776000
}
class Client(BaseClient):
"""
Client for Netskope RESTful API.
Args:
base_url (str): The base URL of Netskope.
token (str): The token to authenticate against Netskope API.
use_ssl (bool): Specifies whether to verify the SSL certificate or not.
use_proxy (bool): Specifies if to use XSOAR proxy settings.
"""
def __init__(self, base_url: str, token: str, use_ssl: bool, use_proxy: bool):
super().__init__(urljoin(base_url, '/api/v1/'), verify=use_ssl, proxy=use_proxy)
self._session.params['token'] = token
def list_events_request(self,
query: Optional[str] = None,
event_type: Optional[str] = None,
timeperiod: Optional[int] = None,
start_time: Optional[int] = None,
end_time: Optional[int] = None,
insertion_start_time: Optional[int] = None,
insertion_end_time: Optional[int] = None,
limit: Optional[int] = None,
skip: Optional[int] = None,
unsorted: Optional[bool] = None) -> Dict[str, Any]:
"""
Get events extracted from SaaS traffic and or logs.
Args:
query (Optional[str]): Free query to filter the events.
event_type (Optional[str]): Select events by their type.
timeperiod (Optional[int]): Get all events from a certain time period.
start_time (Optional[int]): Restrict events to those that have timestamps greater than the provided timestamp.
end_time (Optional[int]): Restrict events to those that have timestamps less than or equal to the provided timestamp.
insertion_start_time (Optional[int]): Restrict events to those that were inserted to the system
after the provided timestamp.
insertion_end_time (Optional[int]): Restrict events to those that were inserted to the system
before the provided timestamp.
limit (Optional[int]): The maximum amount of events to retrieve (up to 10000 events).
skip (Optional[int]): The skip number of the events to retrieve (minimum is 1).
unsorted (Optional[bool]): If true, the returned data will not be sorted (useful for improved performance).
Returns:
Dict[str, Any]: Netskope events.
"""
body = remove_empty_elements({
'query': query,
'type': event_type,
'timeperiod': timeperiod,
'starttime': start_time,
'endtime': end_time,
'insertionstarttime': insertion_start_time,
'insertionendtime': insertion_end_time,
'limit': limit,
'skip': skip,
'unsorted': unsorted
})
return self._http_request(method='POST', url_suffix='events', json_data=body)
def list_alerts_request(self,
query: Optional[str] = None,
alert_type: Optional[str] = None,
acked: Optional[bool] = None,
timeperiod: Optional[int] = None,
start_time: Optional[int] = None,
end_time: Optional[int] = None,
insertion_start_time: Optional[int] = None,
insertion_end_time: Optional[int] = None,
limit: Optional[int] = None,
skip: Optional[int] = None,
unsorted: Optional[bool] = None) -> Dict[str, Any]:
"""
Get alerts generated by Netskope, including policy, DLP, and watch list alerts.
Args:
query (Optional[str]): Free query to filter the alerts.
alert_type (Optional[str]): Select alerts by their type.
acked (Optional[bool]): Whether to retrieve acknowledged alerts or not.
timeperiod (Optional[int]): Get alerts from certain time period.
start_time (Optional[int]): Restrict alerts to those that have timestamps greater than the provided timestamp.
end_time (Optional[int]): Restrict alerts to those that have timestamps less than or equal to the provided timestamp.
insertion_start_time (Optional[int]): Restrict alerts which have been inserted into the system
after the provided timestamp.
insertion_end_time (Optional[int]): Restrict alerts which have been inserted into the system
before the provided timestamp.
limit (Optional[int]): The maximum number of alerts to return (up to 10000).
skip (Optional[int]): The skip number of the alerts to retrieve (minimum is 1).
unsorted (Optional[bool]): If true, the returned data will not be sorted (useful for improved performance).
Returns:
Dict[str, Any]: Netskope alerts.
"""
body = remove_empty_elements({
'query': query,
'alert_type': alert_type,
'acked': acked,
'timeperiod': timeperiod,
'starttime': start_time,
'endtime': end_time,
'insertionstarttime': insertion_start_time,
'insertionendtime': insertion_end_time,
'limit': limit,
'skip': skip,
'unsorted': unsorted
})
return self._http_request(method='POST', url_suffix='alerts', json_data=body)
def list_quarantined_files_request(self,
start_time: Optional[int] = None,
end_time: Optional[int] = None,
limit: Optional[int] = None,
skip: Optional[int] = None) -> Dict[str, Any]:
"""
List all quarantined files.
Args:
start_time (Optional[int]): Get files last modified within a certain time period.
end_time (Optional[int]): Get files last modified within a certain time period.
limit (Optional[int]): The maximum amount of clients to retrieve (up to 10000).
skip (Optional[int]): The skip number of the clients to retrieve (minimum is 1).
Returns:
Dict[str, Any]: Netskope quarantine files.
"""
body = remove_empty_elements({
'starttime': start_time,
'endtime': end_time,
'limit': limit,
'skip': skip,
'op': 'get-files'
})
return self._http_request(method='POST', url_suffix='quarantine', json_data=body)
def get_quarantined_file_request(self, quarantine_profile_id: str, file_id: str) -> bytes:
"""
Download a quarantined file.
Args:
quarantine_profile_id (str): The ID of quarantine profile.
file_id (str): The ID of the quarantined file.
Returns:
bytes: The quarantined file content.
"""
body = {
'quarantine_profile_id': quarantine_profile_id,
'file_id': file_id,
'op': 'download-url'
}
return self._http_request(method='POST',
url_suffix='quarantine',
json_data=body,
resp_type='content')
def update_quarantined_file_request(self, quarantine_profile_id: str, file_id: str,
action: str) -> None:
"""
Take an action on a quarantined file.
Args:
quarantine_profile_id (str): The profile id of the quarantined file.
file_id (str): The id of the quarantined file.
action (str): Action to be performed on a quarantined.
"""
body = {
'quarantine_profile_id': quarantine_profile_id,
'file_id': file_id,
'action': action,
'op': 'take-action'
}
self._http_request(method='POST', url_suffix='quarantine', json_data=body, resp_type='text')
def update_url_list_request(self, name: str, urls: List[str]) -> None:
"""
Update the URL List with the values provided.
Args:
name (str): Name of an existing URL List shown in the Netskope UI on the URL List skip.
urls (List[str]): The content of the URL list.
"""
body = {'name': name, 'list': ','.join(urls)}
self._http_request(method='POST', url_suffix='updateUrlList', json_data=body)
def update_file_hash_list_request(self, name: str, hashes: List[str]) -> None:
"""
Update file hash list with the values provided.
Args:
name (str): Name of an existing file hash list shown in the Netskope UI on the file hash list skip.
hashes (str): List of file hashes (md5 or sha256).
"""
body = {'name': name, 'list': ','.join(hashes)}
return self._http_request(method='POST', url_suffix='updateFileHashList', json_data=body)
def list_clients_request(self,
query: Optional[str] = None,
limit: Optional[int] = None,
skip: Optional[int] = None) -> Dict[str, Any]:
"""
Get information about the Netskope clients.
Args:
query (Optional[str]): Free query on the clients, based on the client fields.
limit (Optional[int]): The maximum amount of clients to retrieve (up to 10000).
skip (Optional[int]): The skip number of the clients to retrieve (minimum is 1).
Returns:
Dict[str, Any]: The clients information.
"""
body = remove_empty_elements({'query': query, 'limit': limit, 'skip': skip})
return self._http_request(method='POST', url_suffix='clients', params=body)
def _http_request(self, *args, **kwargs):
response = super()._http_request(*args, **kwargs)
if isinstance(response, dict) and 'errors' in response:
errors = '\n'.join(response['errors'])
raise DemistoException(f'Invalid API call: {errors}', res=response)
return response
def arg_to_boolean(arg: Optional[str]) -> Optional[bool]:
"""
Converts an XSOAR argument to a Python boolean or None.
Args:
arg (Optional[str]): The argument to convert.
Returns:
Optional[bool]: A boolean if arg can be converted,
or None if arg is None.
"""
if arg is None:
return None
return argToBoolean(arg)
def arg_to_seconds_timestamp(arg: Optional[str]) -> Optional[int]:
"""
Converts an XSOAR date string argument to a timestamp in seconds.
Args:
arg (Optional[str]): The argument to convert.
Returns:
Optional[int]: A timestamp if arg can be converted,
or None if arg is None.
"""
if arg is None:
return None
return date_to_seconds_timestamp(arg_to_datetime(arg))
def date_to_seconds_timestamp(date_str_or_dt: Union[str, datetime]) -> int:
"""
Converts date string or datetime object to a timestamp in seconds.
Args:
date_str_or_dt (Union[str, datetime]): The datestring or datetime.
Returns:
int: The timestamp in seconds.
"""
return date_to_timestamp(date_str_or_dt) // 1000
def validate_time_arguments(start_time: Optional[int] = None,
end_time: Optional[int] = None,
insertion_start_time: Optional[int] = None,
insertion_end_time: Optional[int] = None,
timeperiod: Optional[int] = None) -> None:
"""
Validates time arguments from the user.
The user must provide one of the following:
- start_time and end_time.
- insertion_start_time and insertion_end_time.
- timeperiod.
Args:
start_time (Optional[int], optional): The start time to fetch from the API.
end_time (Optional[int], optional): The end time to fetch from the API.
insertion_start_time (Optional[int], optional): The insertion start time to fetch from the API.
insertion_end_time (Optional[int], optional): The insertion end time to fetch from the API.
timeperiod (Optional[str], optional): The timeperiod to fetch from the API.
Raises:
DemistoException: The user did not provide valid timestamp.
"""
combination = (all((start_time, end_time)), all(
(insertion_start_time, insertion_end_time)), bool(timeperiod))
if not any(combination):
raise DemistoException('Missing time arguments. Please provide start_time and | |
. delegation_set )
oooO0oOOOO0 = oo0ooo . is_ms_peer_entry ( )
oo0ooo . map_referrals_sent += 1
if 25 - 25: o0oOOo0O0Ooo % o0oOOo0O0Ooo - OoooooooOO . i1IIi
if 10 - 10: OoO0O00 % iIii1I11I1II1 * OoOoOO00 / i11iIiiIii - I1IiiI . O0
if 2 - 2: II111iiii
if 13 - 13: Ii1I % i11iIiiIii
if 3 - 3: ooOoO0o % OoOoOO00 * I1Ii111 - OoO0O00 / i1IIi % I1IiiI
if ( action == LISP_DDT_ACTION_NOT_AUTH ) : IiiIIiIi1i11i = True
if ( action in ( LISP_DDT_ACTION_MS_REFERRAL , LISP_DDT_ACTION_MS_ACK ) ) :
IiiIIiIi1i11i = ( oooO0oOOOO0 == False )
if 50 - 50: I1ii11iIi11i + iII111i
if 64 - 64: oO0o
if 11 - 11: o0oOOo0O0Ooo
if 95 - 95: i1IIi . ooOoO0o . Oo0Ooo
if 13 - 13: OOooOOo - Oo0Ooo % O0 . I1Ii111
iiI = lisp_eid_record ( )
iiI . rloc_count = OOO0Oo0o
iiI . authoritative = True
iiI . action = action
iiI . ddt_incomplete = IiiIIiIi1i11i
iiI . eid = eid_prefix
iiI . group = group_prefix
iiI . record_ttl = oo0o
if 66 - 66: I1IiiI + I11i
IIii1i += iiI . encode ( )
iiI . print_record ( " " , True )
if 58 - 58: I1ii11iIi11i
if 7 - 7: oO0o - I11i
if 59 - 59: Ii1I / o0oOOo0O0Ooo / OoO0O00 + IiII + i11iIiiIii
if 64 - 64: o0oOOo0O0Ooo * IiII * IiII * iII111i % i11iIiiIii
if ( OOO0Oo0o != 0 ) :
for ii1iII11 in oo0ooo . delegation_set :
iIii1IiIiI = lisp_rloc_record ( )
iIii1IiIiI . rloc = ii1iII11 . delegate_address
iIii1IiIiI . priority = ii1iII11 . priority
iIii1IiIiI . weight = ii1iII11 . weight
iIii1IiIiI . mpriority = 255
iIii1IiIiI . mweight = 0
iIii1IiIiI . reach_bit = True
IIii1i += iIii1IiIiI . encode ( )
iIii1IiIiI . print_record ( " " )
if 22 - 22: I1ii11iIi11i * II111iiii - OOooOOo % i11iIiiIii
if 10 - 10: OOooOOo / I1ii11iIi11i
if 21 - 21: OoO0O00 % Oo0Ooo . o0oOOo0O0Ooo + IiII
if 48 - 48: O0 / i1IIi / iII111i
if 11 - 11: O0 - OoO0O00 + OoOoOO00 * ooOoO0o - Ii1I
if 82 - 82: Ii1I - O0 * ooOoO0o . ooOoO0o
if 32 - 32: o0oOOo0O0Ooo . OoooooooOO % OOooOOo
if ( map_request . nonce != 0 ) : port = LISP_CTRL_PORT
lisp_send_map_referral ( lisp_sockets , IIii1i , ecm_source , port )
return
if 2 - 2: OoOoOO00 + I1ii11iIi11i + oO0o
if 27 - 27: OoooooooOO - Ii1I / OoooooooOO + OoO0O00
if 58 - 58: OOooOOo * I11i . I1IiiI
if 46 - 46: I11i + II111iiii * iII111i % ooOoO0o - I1IiiI
if 73 - 73: I1ii11iIi11i * iIii1I11I1II1 . I1Ii111 - Ii1I
if 11 - 11: I11i
if 48 - 48: IiII / O0
if 46 - 46: ooOoO0o + oO0o
def lisp_send_negative_map_reply ( sockets , eid , group , nonce , dest , port , ttl ,
xtr_id , pubsub ) :
if 7 - 7: ooOoO0o * oO0o . i1IIi
lprint ( "Build negative Map-Reply EID-prefix {}, nonce 0x{} to ITR {}" . format ( lisp_print_eid_tuple ( eid , group ) , lisp_hex_string ( nonce ) ,
# IiII * OoO0O00 / OoooooooOO % o0oOOo0O0Ooo + OoO0O00
red ( dest . print_address ( ) , False ) ) )
if 25 - 25: IiII % OOooOOo + Ii1I * I1ii11iIi11i
OOo000 = LISP_NATIVE_FORWARD_ACTION if group . is_null ( ) else LISP_DROP_ACTION
if 25 - 25: iIii1I11I1II1 * OoOoOO00 % I1IiiI + IiII
if 34 - 34: ooOoO0o - OoooooooOO . o0oOOo0O0Ooo
if 83 - 83: II111iiii . OOooOOo
if 88 - 88: O0
if 12 - 12: Ii1I % OOooOOo % Oo0Ooo * I1Ii111
if ( lisp_get_eid_hash ( eid ) != None ) :
OOo000 = LISP_SEND_MAP_REQUEST_ACTION
if 96 - 96: iII111i + ooOoO0o
if 100 - 100: OOooOOo . ooOoO0o + Ii1I + Ii1I
IIii1i = lisp_build_map_reply ( eid , group , [ ] , nonce , OOo000 , ttl , False ,
None , False , False )
if 70 - 70: ooOoO0o . iIii1I11I1II1 / oO0o
if 18 - 18: Ii1I / OoooooooOO % i1IIi * o0oOOo0O0Ooo
if 70 - 70: IiII % i1IIi / IiII - o0oOOo0O0Ooo . Oo0Ooo / O0
if 54 - 54: o0oOOo0O0Ooo
if ( pubsub ) :
lisp_process_pubsub ( sockets , IIii1i , eid , dest , port , nonce , ttl ,
xtr_id )
else :
lisp_send_map_reply ( sockets , IIii1i , dest , port )
if 53 - 53: II111iiii / IiII . i1IIi + I1Ii111 / OoO0O00 - OoooooooOO
return
if 67 - 67: ooOoO0o . Ii1I - Oo0Ooo * iII111i . I11i - OOooOOo
if 10 - 10: I11i
if 37 - 37: o0oOOo0O0Ooo / I1IiiI * oO0o / II111iiii
if 39 - 39: IiII - i1IIi - IiII - OoooooooOO - I1ii11iIi11i
if 66 - 66: IiII + i1IIi
if 21 - 21: IiII / i11iIiiIii / OoOoOO00
if 75 - 75: Ii1I . i1IIi / I1IiiI * iII111i . IiII / OoOoOO00
def lisp_retransmit_ddt_map_request ( mr ) :
O0oOo0o = mr . mr_source . print_address ( )
ooO0O00OOoo0O = mr . print_eid_tuple ( )
oOO000 = mr . nonce
if 34 - 34: ooOoO0o * IiII . Ii1I + iIii1I11I1II1
if 1 - 1: i11iIiiIii + I11i
if 78 - 78: Ii1I % Oo0Ooo / OoO0O00 . iIii1I11I1II1 . II111iiii
if 67 - 67: oO0o % I1Ii111
if 72 - 72: I1IiiI . i11iIiiIii . OoOoOO00 + I1IiiI - I1Ii111 + iII111i
if ( mr . last_request_sent_to ) :
i11i11 = mr . last_request_sent_to . print_address ( )
o0ooo000OO = lisp_referral_cache_lookup ( mr . last_cached_prefix [ 0 ] ,
mr . last_cached_prefix [ 1 ] , True )
if ( o0ooo000OO and o0ooo000OO . referral_set . has_key ( i11i11 ) ) :
o0ooo000OO . referral_set [ i11i11 ] . no_responses += 1
if 17 - 17: I1IiiI . i11iIiiIii * OoO0O00 + II111iiii
if 34 - 34: Ii1I - O0 + Ii1I + I11i + I1ii11iIi11i . Ii1I
if 56 - 56: Ii1I
if 58 - 58: iII111i
if 18 - 18: O0 * OoooooooOO % IiII - iIii1I11I1II1 % IiII * o0oOOo0O0Ooo
if 13 - 13: OoO0O00 + i11iIiiIii + O0 / ooOoO0o % iIii1I11I1II1
if 75 - 75: oO0o / i1IIi / Ii1I * Oo0Ooo
if ( mr . retry_count == LISP_MAX_MAP_NOTIFY_RETRIES ) :
lprint ( "DDT Map-Request retry limit reached for EID {}, nonce 0x{}" . format ( green ( ooO0O00OOoo0O , False ) , lisp_hex_string ( oOO000 ) ) )
if 75 - 75: Oo0Ooo / OoooooooOO
mr . dequeue_map_request ( )
return
if 98 - 98: II111iiii - I1Ii111 . ooOoO0o * iII111i
if 49 - 49: I1ii11iIi11i / OoooooooOO - I11i
mr . retry_count += 1
if 76 - 76: i1IIi . OoO0O00 . O0 / OOooOOo - iII111i
IiII1iiI = green ( O0oOo0o , False )
OooOOOoOoo0O0 = green ( ooO0O00OOoo0O , False )
lprint ( "Retransmit DDT {} from {}ITR {} EIDs: {} -> {}, nonce 0x{}" . format ( bold ( "Map-Request" , False ) , "P" if mr . from_pitr else "" ,
# I1IiiI . ooOoO0o . II111iiii % OOooOOo
red ( mr . itr . print_address ( ) , False ) , IiII1iiI , OooOOOoOoo0O0 ,
lisp_hex_string ( oOO000 ) ) )
if 86 - 86: i11iIiiIii + I1ii11iIi11i / OoOoOO00 * OoooooooOO
if 6 - 6: II111iiii
if 26 - 26: iIii1I11I1II1 / iIii1I11I1II1 . IiII * i11iIiiIii
if 21 - 21: OOooOOo + o0oOOo0O0Ooo
lisp_send_ddt_map_request ( mr , False )
if 28 - 28: OOooOOo + i1IIi + II111iiii / Oo0Ooo + iIii1I11I1II1 . Oo0Ooo
if 73 - 73: Ii1I * iIii1I11I1II1 / o0oOOo0O0Ooo | |
perform leiden clustering on the pretrained z to get clusters
mu : np.array, optional
\([d,k]\) The value of initial \(\\mu\).
log_pi : np.array, optional
\([1,K]\) The value of initial \(\\log(\\pi)\).
res:
The resolution of leiden clustering, which is a parameter value controlling the coarseness of the clustering.
Higher values lead to more clusters. Deafult is 1.
ratio_prune : float, optional
The ratio of edges to be removed before estimating.
'''
if cluster_label is None:
print("Perform leiden clustering on the latent space z ...")
g = get_igraph(self.z)
cluster_labels = leidenalg_igraph(g, res = res)
cluster_labels = cluster_labels.astype(str)
uni_cluster_labels = np.unique(cluster_labels)
else:
cluster_labels = self.adata.obs[cluster_label].to_numpy()
uni_cluster_labels = np.array(self.adata.obs[cluster_label].cat.categories)
n_clusters = len(uni_cluster_labels)
if not hasattr(self, 'z'):
self.update_z()
z = self.z
mu = np.zeros((z.shape[1], n_clusters))
for i,l in enumerate(uni_cluster_labels):
mu[:,i] = np.mean(z[cluster_labels==l], axis=0)
# mu[:,i] = z[cluster_labels==l][np.argmin(np.mean((z[cluster_labels==l] - mu[:,i])**2, axis=1)),:]
### update cluster centers if some cluster centers are too close
clustering = AgglomerativeClustering(
n_clusters=None,
distance_threshold=dist_thres,
linkage='complete'
).fit(mu.T/np.sqrt(mu.shape[0]))
n_clusters_new = clustering.n_clusters_
if n_clusters_new < n_clusters:
print("Merge clusters for cluster centers that are too close ...")
n_clusters = n_clusters_new
for i in range(n_clusters):
temp = uni_cluster_labels[clustering.labels_ == i]
idx = np.isin(cluster_labels, temp)
cluster_labels[idx] = ','.join(temp)
if np.sum(clustering.labels_==i)>1:
print('Merge %s'% ','.join(temp))
uni_cluster_labels = np.unique(cluster_labels)
mu = np.zeros((z.shape[1], n_clusters))
for i,l in enumerate(uni_cluster_labels):
mu[:,i] = np.mean(z[cluster_labels==l], axis=0)
self.adata.obs['vitae_init_clustering'] = cluster_labels
self.adata.obs['vitae_init_clustering'] = self.adata.obs['vitae_init_clustering'].astype('category')
print("Initial clustering labels saved as 'vitae_init_clustering' in self.adata.obs.")
if (log_pi is None) and (cluster_labels is not None) and (n_clusters>3):
n_states = int((n_clusters+1)*n_clusters/2)
d = _comp_dist(z, cluster_labels, mu.T)
C = np.triu(np.ones(n_clusters))
C[C>0] = np.arange(n_states)
C = C.astype(int)
log_pi = np.zeros((1,n_states))
## pruning to throw away edges for far-away clusters if there are too many clusters
if ratio_prune is not None:
log_pi[0, C[np.triu(d)>np.quantile(d[np.triu_indices(n_clusters, 1)], 1-ratio_prune)]] = - np.inf
else:
log_pi[0, C[np.triu(d)> np.quantile(d[np.triu_indices(n_clusters, 1)], 5/n_clusters) * 3]] = - np.inf
self.n_states = n_clusters
self.labels = cluster_labels
# Not sure if storing the this will be useful
# self.init_labels_name = cluster_label
labels_map = pd.DataFrame.from_dict(
{i:label for i,label in enumerate(uni_cluster_labels)},
orient='index', columns=['label_names'], dtype=str
)
self.labels_map = labels_map
self.vae.init_latent_space(self.n_states, mu, log_pi)
self.inferer = Inferer(self.n_states)
self.mu = self.vae.latent_space.mu.numpy()
self.pi = np.triu(np.ones(self.n_states))
self.pi[self.pi > 0] = tf.nn.softmax(self.vae.latent_space.pi).numpy()[0]
if pilayer:
self.vae.create_pilayer()
def update_latent_space(self, dist_thres: float=0.5):
pi = self.pi[np.triu_indices(self.n_states)]
mu = self.mu
clustering = AgglomerativeClustering(
n_clusters=None,
distance_threshold=dist_thres,
linkage='complete'
).fit(mu.T/np.sqrt(mu.shape[0]))
n_clusters = clustering.n_clusters_
if n_clusters<self.n_states:
print("Merge clusters for cluster centers that are too close ...")
mu_new = np.empty((self.dim_latent, n_clusters))
C = np.zeros((self.n_states, self.n_states))
C[np.triu_indices(self.n_states, 0)] = pi
C = np.triu(C, 1) + C.T
C_new = np.zeros((n_clusters, n_clusters))
uni_cluster_labels = self.labels_map['label_names'].to_numpy()
returned_order = {}
cluster_labels = self.labels
for i in range(n_clusters):
temp = uni_cluster_labels[clustering.labels_ == i]
idx = np.isin(cluster_labels, temp)
cluster_labels[idx] = ','.join(temp)
returned_order[i] = ','.join(temp)
if np.sum(clustering.labels_==i)>1:
print('Merge %s'% ','.join(temp))
uni_cluster_labels = np.unique(cluster_labels)
for i,l in enumerate(uni_cluster_labels): ## reorder the merged clusters based on the cluster names
k = np.where(returned_order == l)
mu_new[:, i] = np.mean(mu[:,clustering.labels_==k], axis=-1)
# sum of the aggregated pi's
C_new[i, i] = np.sum(np.triu(C[clustering.labels_==k,:][:,clustering.labels_==k]))
for j in range(i+1, n_clusters):
k1 = np.where(returned_order == uni_cluster_labels[j])
C_new[i, j] = np.sum(C[clustering.labels_== k, :][:, clustering.labels_==k1])
# labels_map_new = {}
# for i in range(n_clusters):
# # update label map: int->str
# labels_map_new[i] = self.labels_map.loc[clustering.labels_==i, 'label_names'].str.cat(sep=',')
# if np.sum(clustering.labels_==i)>1:
# print('Merge %s'%labels_map_new[i])
# # mean of the aggregated cluster means
# mu_new[:, i] = np.mean(mu[:,clustering.labels_==i], axis=-1)
# # sum of the aggregated pi's
# C_new[i, i] = np.sum(np.triu(C[clustering.labels_==i,:][:,clustering.labels_==i]))
# for j in range(i+1, n_clusters):
# C_new[i, j] = np.sum(C[clustering.labels_== i, :][:, clustering.labels_==j])
C_new = np.triu(C_new,1) + C_new.T
pi_new = C_new[np.triu_indices(n_clusters)]
log_pi_new = np.log(pi_new, out=np.ones_like(pi_new)*(-np.inf), where=(pi_new!=0)).reshape((1,-1))
self.n_states = n_clusters
self.labels_map = pd.DataFrame.from_dict(
{i:label for i,label in enumerate(uni_cluster_labels)},
orient='index', columns=['label_names'], dtype=str
)
self.labels = cluster_labels
# self.labels_map = pd.DataFrame.from_dict(
# labels_map_new, orient='index', columns=['label_names'], dtype=str
# )
self.vae.init_latent_space(self.n_states, mu_new, log_pi_new)
self.inferer = Inferer(self.n_states)
self.mu = self.vae.latent_space.mu.numpy()
self.pi = np.triu(np.ones(self.n_states))
self.pi[self.pi > 0] = tf.nn.softmax(self.vae.latent_space.pi).numpy()[0]
def train(self, stratify = False, test_size = 0.1, random_state: int = 0,
learning_rate: float = 1e-2, batch_size: int = 256,
L: int = 1, alpha: float = 0.10, beta: float = 2, gamma: float = 1,
num_epoch: int = 200, num_step_per_epoch: Optional[int] = None,
early_stopping_patience: int = 10, early_stopping_tolerance: float = 0.01,
early_stopping_relative: bool = True, early_stopping_warmup: int = 0,
# path_to_weights: Optional[str] = None,
verbose: bool = False, **kwargs):
'''Train the model.
Parameters
----------
stratify : np.array, None, or False
If an array is provided, or `stratify=None` and `self.labels` is available, then they will be used to perform stratified shuffle splitting. Otherwise, general shuffle splitting is used. Set to `False` if `self.labels` is not intended for stratified shuffle splitting.
test_size : float or int, optional
The proportion or size of the test set.
random_state : int, optional
The random state for data splitting.
learning_rate : float, optional
The initial learning rate for the Adam optimizer.
batch_size : int, optional
The batch size for training. Default is 256. Set to 32 if number of cells is small (less than 1000)
L : int, optional
The number of MC samples.
alpha : float, optional
The value of alpha in [0,1] to encourage covariate adjustment. Not used if there is no covariates.
beta : float, optional
The value of beta in beta-VAE.
num_epoch : int, optional
The number of epoch.
num_step_per_epoch : int, optional
The number of step per epoch, it will be inferred from number of cells and batch size if it is None.
early_stopping_patience : int, optional
The maximum number of epochs if there is no improvement.
early_stopping_tolerance : float, optional
The minimum change of loss to be considered as an improvement.
early_stopping_relative : bool, optional
Whether monitor the relative change of loss or not.
early_stopping_warmup : int, optional
The number of warmup epochs.
path_to_weights : str, optional
The path of weight file to be saved; not saving weight if None.
**kwargs :
Extra key-value arguments for dimension reduction algorithms.
'''
if gamma == 0 or self.conditions is None:
conditions = np.array([np.nan] * self.adata.shape[0])
else:
conditions = self.conditions
if stratify is None:
stratify = self.labels
elif stratify is False:
stratify = None
id_train, id_test = train_test_split(
np.arange(self.X_input.shape[0]),
test_size=test_size,
stratify=stratify,
random_state=random_state)
if num_step_per_epoch is None:
num_step_per_epoch = len(id_train)//batch_size+1
c = None if self.c_score is None else self.c_score.astype(tf.keras.backend.floatx())
self.train_dataset = train.warp_dataset(self.X_input[id_train].astype(tf.keras.backend.floatx()),
None if c is None else c[id_train],
batch_size,
self.X_output[id_train].astype(tf.keras.backend.floatx()),
self.scale_factor[id_train].astype(tf.keras.backend.floatx()),
conditions = conditions[id_train],
pi_cov = self.pi_cov[id_train])
self.test_dataset = train.warp_dataset(self.X_input[id_test].astype(tf.keras.backend.floatx()),
None if c is None else c[id_test],
batch_size,
self.X_output[id_test].astype(tf.keras.backend.floatx()),
self.scale_factor[id_test].astype(tf.keras.backend.floatx()),
conditions = conditions[id_test],
pi_cov = self.pi_cov[id_test])
self.vae = train.train(
self.train_dataset,
self.test_dataset,
self.vae,
learning_rate,
L,
alpha,
beta,
gamma,
num_epoch,
num_step_per_epoch,
early_stopping_patience,
early_stopping_tolerance,
early_stopping_relative,
early_stopping_warmup,
verbose,
**kwargs
)
self.update_z()
self.mu = self.vae.latent_space.mu.numpy()
self.pi = np.triu(np.ones(self.n_states))
self.pi[self.pi > 0] = tf.nn.softmax(self.vae.latent_space.pi).numpy()[0]
# if path_to_weights is not None:
# self.save_model(path_to_weights)
def output_pi(self, pi_cov):
"""return a matrix n_states by n_states and a mask for plotting, which can be used to cover the lower triangular(except the diagnoals) of a heatmap"""
p = self.vae.pilayer
pi_cov = tf.expand_dims(tf.constant([pi_cov], dtype=tf.float32), 0)
pi_val = tf.nn.softmax(p(pi_cov)).numpy()[0]
# Create heatmap matrix
n = self.vae.n_states
matrix = np.zeros((n, n))
matrix[np.triu_indices(n)] = pi_val
mask = np.tril(np.ones_like(matrix), k=-1)
return matrix, mask
def return_pilayer_weights(self):
"""return parameters of pilayer, which has dimension dim(pi_cov) + 1 by n_categories, the last row is biases"""
return np.vstack((model.vae.pilayer.weights[0].numpy(), model.vae.pilayer.weights[1].numpy().reshape(1, -1)))
def posterior_estimation(self, batch_size: int = 32, L: int = 10, **kwargs):
'''Initialize trajectory inference by computing the posterior estimations.
Parameters
----------
batch_size : int, optional
The batch size when doing inference.
L : int, optional
The number of MC samples when doing inference.
**kwargs :
Extra key-value arguments for dimension reduction algorithms.
'''
c = None if self.c_score is None else self.c_score.astype(tf.keras.backend.floatx())
self.test_dataset = train.warp_dataset(self.X_input.astype(tf.keras.backend.floatx()),
c,
batch_size)
_, _, self.pc_x,\
self.cell_position_posterior,self.cell_position_variance,_ = self.vae.inference(self.test_dataset, L=L)
uni_cluster_labels = | |
33-67.
E = (Pk * np.log(1 / Pk)).sum()
MIT = np.nansum(ti[:, None] * pik * np.log(pik / Pk)) / (T * E)
return MIT, core_data, groups
class MultiInformationTheory:
"""
Calculation of Multigroup Information Theory index
Parameters
----------
data : a pandas DataFrame
groups : list of strings.
The variables names in data of the groups of interest of the analysis.
Attributes
----------
statistic : float
Multigroup Information Theory Index
core_data : a pandas DataFrame
A pandas DataFrame that contains the columns used to perform the estimate.
Examples
--------
In this example, we are going to use 2000 Census Tract Data for Sacramento MSA, CA. The groups of interest are White, Black, Asian and Hispanic population.
Firstly, we need to perform some import the modules and the respective function.
>>> import libpysal
>>> import geopandas as gpd
>>> from segregation.multigroup_aspatial import MultiInformationTheory
Then, we read the data and create an auxiliary list with only the necessary columns for fitting the index.
>>> input_df = gpd.read_file(libpysal.examples.get_path("sacramentot2.shp"))
>>> groups_list = ['WHITE_', 'BLACK_', 'ASIAN_','HISP_']
The value is estimated below.
>>> index = MultiInformationTheory(input_df, groups_list)
>>> index.statistic
0.1710160297858887
Notes
-----
Based on Reardon, <NAME>., and <NAME>. "Measures of multigroup segregation." Sociological methodology 32.1 (2002): 33-67.
Reference: :cite:`reardon2002measures`.
"""
def __init__(self, data, groups):
aux = _multi_information_theory(data, groups)
self.statistic = aux[0]
self.core_data = aux[1]
self._groups = aux[2]
self._function = _multi_information_theory
def _multi_relative_diversity(data, groups):
"""
Calculation of Multigroup Relative Diversity index
Parameters
----------
data : a pandas DataFrame
groups : list of strings.
The variables names in data of the groups of interest of the analysis.
Returns
-------
statistic : float
Multigroup Relative Diversity Index
core_data : a pandas DataFrame
A pandas DataFrame that contains the columns used to perform the estimate.
Notes
-----
Based on Reardon, <NAME>. "Measures of racial diversity and segregation in multigroup and hierarchically structured populations." annual meeting of the Eastern Sociological Society, Philadelphia, PA. 1998.
High diversity means less segregation.
Reference: :cite:`reardon1998measures`.
"""
core_data = data[groups]
data = _nan_handle(core_data)
df = np.array(core_data)
T = df.sum()
ti = df.sum(axis=1)
pik = df / ti[:, None]
Pk = df.sum(axis=0) / df.sum()
Is = (Pk * (1 - Pk)).sum()
MRD = (ti[:, None] * (pik - Pk)**2).sum() / (T * Is)
return MRD, core_data, groups
class MultiRelativeDiversity:
"""
Calculation of Multigroup Relative Diversity index
Parameters
----------
data : a pandas DataFrame
groups : list of strings.
The variables names in data of the groups of interest of the analysis.
Attributes
----------
statistic : float
Multigroup Relative Diversity Index
core_data : a pandas DataFrame
A pandas DataFrame that contains the columns used to perform the estimate.
Examples
--------
In this example, we are going to use 2000 Census Tract Data for Sacramento MSA, CA. The groups of interest are White, Black, Asian and Hispanic population.
Firstly, we need to perform some import the modules and the respective function.
>>> import libpysal
>>> import geopandas as gpd
>>> from segregation.multigroup_aspatial import MultiRelativeDiversity
Then, we read the data and create an auxiliary list with only the necessary columns for fitting the index.
>>> input_df = gpd.read_file(libpysal.examples.get_path("sacramentot2.shp"))
>>> groups_list = ['WHITE_', 'BLACK_', 'ASIAN_','HISP_']
The value is estimated below.
>>> index = MultiRelativeDiversity(input_df, groups_list)
>>> index.statistic
0.15820019878220337
Notes
-----
Based on Reardon, <NAME>. "Measures of racial diversity and segregation in multigroup and hierarchically structured populations." annual meeting of the Eastern Sociological Society, Philadelphia, PA. 1998.
High diversity means less segregation.
Reference: :cite:`reardon1998measures`.
"""
def __init__(self, data, groups):
aux = _multi_relative_diversity(data, groups)
self.statistic = aux[0]
self.core_data = aux[1]
self._groups = aux[2]
self._function = _multi_relative_diversity
def _multi_squared_coefficient_of_variation(data, groups):
"""
Calculation of Multigroup Squared Coefficient of Variation index
Parameters
----------
data : a pandas DataFrame
groups : list of strings.
The variables names in data of the groups of interest of the analysis.
Returns
-------
statistic : float
Multigroup Squared Coefficient of Variation Index
core_data : a pandas DataFrame
A pandas DataFrame that contains the columns used to perform the estimate.
Notes
-----
Based on Reardon, <NAME>., and <NAME>. "Measures of multigroup segregation." Sociological methodology 32.1 (2002): 33-67.
Reference: :cite:`reardon2002measures`.
"""
core_data = data[groups]
data = _nan_handle(core_data)
df = np.array(core_data)
K = df.shape[1]
T = df.sum()
ti = df.sum(axis=1)
pik = df / ti[:, None]
Pk = df.sum(axis=0) / df.sum()
C = ((ti[:, None] * (pik - Pk)**2) / (T * (K - 1) * Pk)).sum()
return C, core_data, groups
class MultiSquaredCoefficientVariation:
"""
Calculation of Multigroup Squared Coefficient of Variation index
Parameters
----------
data : a pandas DataFrame
groups : list of strings.
The variables names in data of the groups of interest of the analysis.
Attributes
----------
statistic : float
Multigroup Squared Coefficient of Variation Index
core_data : a pandas DataFrame
A pandas DataFrame that contains the columns used to perform the estimate.
Examples
--------
In this example, we are going to use 2000 Census Tract Data for Sacramento MSA, CA. The groups of interest are White, Black, Asian and Hispanic population.
Firstly, we need to perform some import the modules and the respective function.
>>> import libpysal
>>> import geopandas as gpd
>>> from segregation.multigroup_aspatial import MultiSquaredCoefficientVariation
Then, we read the data and create an auxiliary list with only the necessary columns for fitting the index.
>>> input_df = gpd.read_file(libpysal.examples.get_path("sacramentot2.shp"))
>>> groups_list = ['WHITE_', 'BLACK_', 'ASIAN_','HISP_']
The value is estimated below.
>>> index = MultiSquaredCoefficientVariation(input_df, groups_list)
>>> index.statistic
0.11875484641127525
Notes
-----
Based on Reardon, <NAME>., and <NAME>. "Measures of multigroup segregation." Sociological methodology 32.1 (2002): 33-67.
Reference: :cite:`reardon2002measures`.
"""
def __init__(self, data, groups):
aux = _multi_squared_coefficient_of_variation(data, groups)
self.statistic = aux[0]
self.core_data = aux[1]
self._groups = aux[2]
self._function = _multi_squared_coefficient_of_variation
def _multi_diversity(data, groups, normalized=False):
"""
Calculation of Multigroup Diversity index
Parameters
----------
data : a pandas DataFrame
groups : list of strings.
The variables names in data of the groups of interest of the analysis.
Returns
-------
statistic : float
Multigroup Diversity Index
core_data : a pandas DataFrame
A pandas DataFrame that contains the columns used to perform the estimate.
normalized : bool. Default is False.
Wheter the resulting index will be divided by its maximum (natural log of the number of groups)
Notes
-----
Based on Reardon, <NAME>., and <NAME>. "Measures of multigroup segregation." Sociological methodology 32.1 (2002): 33-67 and <NAME>. "Statistical decomposition analysis; with applications in the social and administrative sciences". No. 04; HA33, T4.. 1972.
This is also know as Theil's Entropy Index (Equation 2 of page 37 of Reardon, <NAME>., and <NAME>. "Measures of multigroup segregation." Sociological methodology 32.1 (2002): 33-67)
High diversity means less segregation.
Reference: :cite:`reardon2002measures`.
"""
core_data = data[groups]
data = _nan_handle(core_data)
df = np.array(core_data)
Pk = df.sum(axis=0) / df.sum()
E = -(Pk * np.log(Pk)).sum()
if normalized:
K = df.shape[1]
E = E / np.log(K)
return E, core_data, groups
class MultiDiversity:
"""
Calculation of Multigroup Diversity index
Parameters
----------
data : a pandas DataFrame
groups : list of strings.
The variables names in data of the groups of interest of the analysis.
Attributes
----------
statistic : float
Multigroup Diversity Index
core_data : a pandas DataFrame
A pandas DataFrame that contains the columns used to perform the estimate.
Examples
--------
In this example, we are going to use 2000 Census Tract Data for Sacramento MSA, CA. The groups of interest are White, Black, Asian and Hispanic population.
Firstly, we need to perform some import the modules and the respective function.
>>> import libpysal
>>> import geopandas as gpd
>>> from segregation.multigroup_aspatial import MultiDiversity
Then, we read the data and create an auxiliary list with only the necessary columns for fitting the index.
>>> input_df = gpd.read_file(libpysal.examples.get_path("sacramentot2.shp"))
>>> groups_list = | |
<filename>openlis/model/recursive_model_index_simple.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import os
import math
import numpy as np
import tensorflow as tf
from six.moves import xrange
class RMI_simple(object):
""" Implements the simple "Recursive-index model" described in the paper
'The Case for Learned Index Structures', which can be found at
[Kraska et al., 2017](http://arxiv.org/abs/1712.01208)
([pdf](http://arxiv.org/pdf/1712.01208.pdf)).
The first stage is a fully connected neural network with any number
(>=0) of hidden layers. Each second stage model is a single-variable
linear regression.
At model creation, the user can choose the widths of the
hidden layers and the number of models ("experts") used in
stage 2.
"""
def __init__(self,
data_set,
hidden_layer_widths=[16,16],
num_experts=10,
learning_rates = [0.1,0.1],
max_steps = [1000,1000],
batch_sizes = [1000,1000],
model_save_dir = 'tf_checkpoints'):
"""Initializes the Recursive-index model
Args:
data_set: object of type DataSet, which the model will train on
hidden layer_widths: list of hidden layer widths (use empty list
for zero hidden layers)
num_experts: number of models ("experts") used in stage 2
learning_rates: list (length=2) of learning rates for each stage
max_steps: list (length=2) of maximum number of training steps for each stage
batch_sizes: list (length=2) of batch training sizes for each stage
model_save_dir: Name of directory to save model
"""
# Initialize from input parameters
self._data_set = data_set
self.hidden_layer_widths = hidden_layer_widths
self.num_experts = num_experts
self.learning_rates = learning_rates
self.max_steps = max_steps
self.batch_sizes = batch_sizes
self.model_save_dir = model_save_dir
# Decide which optimized inference function to use, based on
# number of hidden layers.
num_hidden_layers = len(self.hidden_layer_widths)
if num_hidden_layers == 0:
self.run_inference = self._run_inference_numpy_0_hidden
elif num_hidden_layers == 1:
self.run_inference = self._run_inference_numpy_1_hidden
elif num_hidden_layers == 2:
self.run_inference = self._run_inference_numpy_2_hidden
else:
self.run_inference = self._run_inference_numpy_n_hidden
# Store prediction errors for each expert
# Fill these values using self.calc_min_max_errors()
self.max_error_left = None
self.max_error_right = None
self.min_predict = None
self.max_predict = None
self.min_pos = None
self.max_pos = None
self._initialize_errors()
# Define variables to stored trained tensor variables
# (e.g. weights and biases).
# These are used to run inference faster with numpy
# rather than with TensorFlow.
self.hidden_w = [None] * num_hidden_layers
self.hidden_b = [None] * num_hidden_layers
self.linear_w = None
self.linear_b = None
self.stage_2_w = None
self.stage_2_b = None
self._expert_factor = None
# Pre-calculate some normalization and computation constants,
# so that they are not repeatedly calculated later.
# Normalize using mean and dividing by the standard deviation
self._keys_mean = self._data_set.keys_mean
self._keys_std_inverse = 1.0 / self._data_set.keys_std
# Normalize further by dividing by 2*sqrt(3), so that
# a uniform distribution in the range [a,b] would transform
# to a uniform distribution in the range [-0.5,0.5]
self._keys_norm_factor = 0.5 / np.sqrt(3)
# Precalculation for expert = floor(stage_1_pos * expert_factor)
self._expert_factor = self.num_experts/self._data_set.num_positions
def new_data(self, data_set):
"""Changes the data set used for training. For example, this function should
be called after a large number of inserts are performed.
Args:
data_set: type DataSet, replaces current data_set with new data_set
"""
self._data_set = data_set
# Normalize using mean and dividing by the standard deviation
self._keys_mean = self._data_set.keys_mean
self._keys_std_inverse = 1.0 / self._data_set.keys_std
# Normalize further by dividing by 2*sqrt(3), so that
# a uniform distribution in the range [a,b] would transform
# to a uniform distribution in the range [-0.5,0.5]
self._keys_norm_factor = 0.5 / np.sqrt(3)
# Precalculation for expert = floor(stage_1_pos * expert_factor)
self._expert_factor = self.num_experts/self._data_set.num_positions
def _setup_placeholder_inputs(self,batch_size):
"""Create placeholder tensors for inputing keys and positions.
Args:
batch_size: Batch size.
Returns:
keys_placeholder: Keys placeholder tensor.
labels_placeholder: Labels placeholder tensor.
"""
# The first dimension is None for both placeholders in order
# to handle variable batch sizes
with tf.name_scope("placeholders"):
keys_placeholder = tf.placeholder(tf.float32, shape=(None,self._data_set.key_size), name="keys")
labels_placeholder = tf.placeholder(tf.int64, shape=(None), name="labels")
return keys_placeholder, labels_placeholder
def _fill_feed_dict(self, keys_pl, labels_pl, batch_size=100, shuffle=True):
""" Creates a dictionary for use with TensorFlow's feed_dict
Args:
keys_pl: TensorFlow (TF) placeholder for keys,
created from self._setup_placeholder_inputs().
labels_pl: TF placeholder for labels (i.e. the key positions)
created from self._setup_placeholder_inputs().
batch_size: integer size of batch
shuffle: whether or not to shuffle the data
Note: shuffle=Flase can be useful for debugging
Returns:
feed_dict: The feed dictionary mapping from placeholders to values.
"""
# Create the feed_dict for the placeholders filled with the next
# `batch size` examples.
keys_feed, labels_feed = self._data_set.next_batch(batch_size,shuffle)
feed_dict = {
keys_pl: keys_feed,
labels_pl: labels_feed,
}
return feed_dict
def _setup_inference_stage_1(self, keys):
"""Set up Stage 1 inference.
Args:
keys: Tensorflow placeholder for keys
Returns:
pos_stage_1: Output tensor that predicts key position
"""
# All Stage 1 operations should be in 'stage_1' name_Scope
with tf.name_scope('stage_1'):
keys_std = self._data_set.keys_std
keys_mean = self._data_set.keys_mean
key_size = self._data_set.key_size
hidden_widths = self.hidden_layer_widths
# Normalize
with tf.name_scope('normalize'):
keys = tf.cast(keys,dtype=tf.float64)
# Normalize using mean and standard deviation
keys_normed = tf.scalar_mul(tf.constant(1.0/keys_std),
tf.subtract(keys,tf.constant(keys_mean)))
# Normalize further by dividing by 2*sqrt(3), so that
# a uniform distribution in the range [a,b] would transform
# to a uniform distribution in the range [-0.5,0.5]
keys_normed = tf.scalar_mul(tf.constant(0.5/np.sqrt(3)),
keys_normed)
# All hidden layers
tf_output = keys_normed # previous output
output_size = key_size # previous output size
for layer_idx in range(0,len(hidden_widths)):
tf_input = tf_output # get current inputs from previous outputs
input_size = output_size
output_size = hidden_widths[layer_idx]
name_scope = "hidden_" + str(layer_idx+1) # Layer num starts at 1
with tf.name_scope(name_scope):
weights = tf.Variable(
tf.truncated_normal([input_size, output_size],
stddev=1.0 / math.sqrt(float(input_size)),
dtype=tf.float64),
name='weights',
dtype=tf.float64)
biases = tf.Variable(tf.zeros([output_size],dtype=tf.float64),
name='biases',
dtype=tf.float64)
tf_output = tf.nn.relu(tf.matmul(tf_input, weights) + biases)
# Linear
with tf.name_scope('linear'):
weights = tf.Variable(
tf.truncated_normal([output_size, 1],
stddev=1.0 / math.sqrt(float(output_size)),
dtype=tf.float64),
name='weights')
biases = tf.Variable(tf.zeros([1],dtype=tf.float64),
name='biases')
pos_stage_1 = tf.matmul(tf_output, weights) + biases
if (key_size == 1):
pos_stage_1 = tf.reshape(pos_stage_1,[-1])
# At this point we want the model to have produced
# output in the range [-0.5, 0.5], but we want the
# final output to be in the range [0,N), so we need
# to add 0.5 and multiply by N.
# Doing normalization this way can effect how
# the learning rates scale with N, so we should
# consider doing this normalization outside of
# the Tensflow pipeline.
pos_stage_1 = tf.scalar_mul(tf.constant(self._data_set.num_positions,
dtype=tf.float64),
tf.add(pos_stage_1,
tf.constant(0.5,dtype=tf.float64)))
pos_stage_1 = tf.identity(pos_stage_1,name="pos")
return pos_stage_1
def _setup_loss_stage_1(self, pos_stage_1, pos_true):
"""Calculates the loss from the keys and positions, for Stage 1.
Args:
pos_stage_1: int64 tensor with shape [batch_size, 1].
The position predicted in stage 1
pos_true: int64 tensor wiht shape [batch_size].
The true position for the key.
Returns:
loss: Loss tensor, using mean_squared_error.
"""
labels = tf.to_int64(pos_true)
loss = tf.losses.mean_squared_error(
labels=pos_true,
predictions=pos_stage_1)
return loss
def _setup_training_stage_1(self, loss):
"""Sets up the TensorFlow training operations for Stage 1.
Args:
loss: loss tensor, from self._setup_loss_stage_1()
Returns:
train_op: the TensorFlow operation for training Stage 1.
"""
# Add a scalar summary for the snapshot loss.
tf.summary.scalar('loss', loss)
# Create optimizer with the given learning rate.
# AdamOptimizer is used, but other other optimizers could
# have been chosen (e.g. the commented-out examples).
optimizer = tf.train.AdamOptimizer(self.learning_rates[0])
#optimizer = tf.train.AdadeltaOptimizer(self.learning_rates[0])
#optimizer = tf.train.GradientDescentOptimizer(self.learning_rates[0])
# Create a variable to track the global step.
global_step = tf.Variable(0, name='global_step', trainable=False)
# Use the optimizer to apply the gradients that minimize the loss
# (and also increment the global step counter) as a single training step.
train_op = optimizer.minimize(loss, global_step=global_step)
return train_op
def _setup_inference_stage_2(self, keys, pos_stage_1):
"""Set up Stage 2 inference.
Args:
keys: TensorFlow placeholder for keys
pos_stage_1: tensor, output of Stage 1 inference
Returns:
pos_stage_2: tensor, output of Stage 2 inference
"""
max_index = self._data_set.num_positions
# Stage 2
with tf.name_scope('stage_2'):
keys_std = self._data_set.keys_std
keys_mean = self._data_set.keys_mean
keys = tf.squeeze(keys,1)
keys = tf.identity(keys,name='key')
keys = tf.cast(keys,dtype=tf.float64)
# Normalize using mean and standard deviation
keys_normed = tf.scalar_mul(tf.constant(1.0/keys_std),
tf.subtract(keys,tf.constant(keys_mean)))
# Normalize further by dividing by 2*sqrt(3), so that
# a uniform distribution in the range [a,b] would transform
# to a uniform distribution in the range [-0.5,0.5]
keys_normed = tf.scalar_mul(tf.constant(0.5/np.sqrt(3)),
keys_normed)
# Calculate which expert to use
expert_index = tf.to_int32(
tf.floor(
tf.scalar_mul(tf.constant(self._expert_factor,dtype=tf.float64),
pos_stage_1)))
# Ensure that expert_index is within range | |
#################################################################################################
# Visual object tracking in panoramic video
# Master thesis at Brno University of Technology - Faculty of Information Technology
# Author: <NAME> (<EMAIL>)
# Supervisor: Doc. Ing. <NAME>, Ph.D.
# Module: evaluation.py
# Description: Evaluation of single object trackers in custom groundtruth dataset.
# Drawing groundtruth+result bounding boxes or computing metrics.
#################################################################################################
from cv2 import cv2
import numpy as np
import sys
import glob
import os
currentdir = os.path.dirname(os.path.realpath(__file__))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
from code.boundingbox import Parser
from code.boundingbox import BoundingBox
class Evaluation:
"""
Evaluation of single object trackers in custom groundtruth dataset.
Drawing groundtruth+result bounding boxes or computing metrics.
"""
def __init__(self, path: str, groundtruthPath: str, resultPath: str):
# list of annotated groundtruth bounding box objects
self.gt_bounding_boxes = []
# list of tracker result bounding boxes
self.gt_bounding_boxes = []
# path of video file or directory with *.jpg images
self.path = path
# path of file with groundtruth data
self.groundtruth_path = groundtruthPath
# path of file with groundtruth data
self.result_path = resultPath
# enable parsing/creating methods
self.parser = Parser()
self.video = None
self.video_width = None
self.video_height = None
# constants for sizes and positions of opencv rectangles and texts
self.RECTANGLE_BORDER_PX = 2
self.FONT_SCALE = 0.75
self.FONT_WEIGHT = 2
self.TEXT_ROW1_POS = (20,30)
self.TEXT_ROW2_POS = (20,60)
self.TEXT_ROW2_POS2 = (280,60)
self.TEXT_ROW3_POS = (20,90)
self.TEXT_ROW3_POS2 = (280,90)
self.WINDOW_NAME = "Evaluation"
def loadInit(self):
"""Method for loading video, groundtruth and result data"""
# Read video
self.video = cv2.VideoCapture(self.path)
# Exit if video not opened.
if not self.video.isOpened():
print("Error - Could not open video")
sys.exit(-1)
# store video width/height to variables
self.video_width = int(self.video.get(cv2.CAP_PROP_FRAME_WIDTH))
self.video_height = int(self.video.get(cv2.CAP_PROP_FRAME_HEIGHT))
# Read and parse existing groundtruth file
if not(os.path.exists(self.groundtruth_path)):
print("Error - Could not read a groundtruth file")
sys.exit(-1)
# Read and parse existing tracking result file
if not(os.path.exists(self.result_path)):
print("Error - Could not read a tracking result file")
sys.exit(-1)
# list of annotated bounding box objects
self.gt_bounding_boxes = []
# list of tracking result bounding box objects
self.result_bounding_boxes = []
# parsing groundtruth and result files
self.gt_bounding_boxes = self.parser.parseGivenDataFile(self.groundtruth_path, self.video_width)
self.result_bounding_boxes = self.parser.parseGivenDataFile(self.result_path, self.video_width)
############################################################
################ Intersection over Union ###################
############################################################
def computeIntersectionOverUnion(self):
"""
Method for computing IoU metric between groundtruth and result bounding boxes
Intersection over Union is an evaluation metric used to measure the accuracy/success of an object tracker/detector
"""
if len(self.gt_bounding_boxes) == len(self.result_bounding_boxes):
iou_string = ""
# loop in bounding_boxes lists
for idx in range(len(self.gt_bounding_boxes)):
gt_bbox = self.gt_bounding_boxes[idx]
result_bbox = self.result_bounding_boxes[idx]
# check if ground truth is not nan (occlusion) -> ignore occluded frames
if gt_bbox.point1 and gt_bbox.point2:
iou = self.intersectionOverUnion(gt_bbox, result_bbox)
# store iou results to list
iou_string += str(iou) + "\n"
# saving file on drive
saveFilePath = self.result_path.replace(".txt", "-iou.txt")
newFile = open(saveFilePath, "w")
newFile.write(iou_string)
newFile.close()
print("File '" + saveFilePath + "' has been created.")
self.video.release()
# inspired and modified from https://www.pyimagesearch.com/2016/11/07/intersection-over-union-iou-for-object-detection/
def intersectionOverUnion(self, bboxA: BoundingBox, bboxB: BoundingBox):
"""Method for computing IoU metric between 2 given bounding boxes"""
if bboxA.point1 and bboxA.point2 and bboxB.point1 and bboxB.point2:
# bboxA and bboxB have valid coordinates
# determine the (x,y)-coordinates of the intersection rectangle
left_top_x = max(bboxA.get_point1_x(), bboxB.get_point1_x())
left_top_y = max(bboxA.get_point1_y(), bboxB.get_point1_y())
# not using point2 directly for right_bottom
# because point1 could be on right border, and point2 could be on left border of image
right_bottom_x = min(bboxA.get_point1_x() + bboxA.get_width(), bboxB.get_point1_x() + bboxB.get_width())
right_bottom_y = min(bboxA.get_point1_y() + bboxA.get_height(), bboxB.get_point1_y() + bboxB.get_height())
# compute the area of intersection rectangle (inc +1 because of zero indexing in pixels coordinates)
intersection_area = max(0, right_bottom_x - left_top_x + 1) * max(0, right_bottom_y - left_top_y + 1)
# compute the area of both the prediction and ground-truth rectangles
bboxA_area = bboxA.get_width() * bboxA.get_height()
bboxB_area = bboxB.get_width() * bboxB.get_height()
# compute the intersection over union by taking the intersection area
# and dividing it by the sum of result + ground-truth areas - the interesection area
iou = intersection_area / float(bboxA_area + bboxB_area - intersection_area)
# possible fix because of previous float rounding - max iou is 1.0
if iou > 1.0:
iou = 1.0
return iou
else:
# tracker failures
return 0.0
############################################################
################ Euclidian distance (L2 norm) ##############
############################################################
def computeCenterError(self):
"""
Method for computing Location error metric between groundtruth and result bounding boxes centers
Location error is an evaluation metric used to measure the accuracy of an object tracker/detector
"""
if len(self.gt_bounding_boxes) == len(self.result_bounding_boxes):
center_error_string = ""
# loop in bounding_boxes lists
for idx in range(len(self.gt_bounding_boxes)):
gt_bbox = self.gt_bounding_boxes[idx]
result_bbox = self.result_bounding_boxes[idx]
# check if ground truth is not Nan (occlusion) -> ignore occluded frames
if gt_bbox.point1 and gt_bbox.point2:
center_error = self.centerError(gt_bbox, result_bbox)
center_error_string += str(center_error) + "\n"
# saving file on drive
saveFilePath = self.result_path.replace(".txt", "-centererror.txt")
newFile = open(saveFilePath, "w")
newFile.write(center_error_string)
newFile.close()
print("File '" + saveFilePath + "' has been created.")
self.video.release()
def centerError(self, bboxA: BoundingBox, bboxB: BoundingBox):
"""
Method for computing Euclidian distance between 2 given bounding boxes
This method also normalizes distance according to video height (our dataset varies a lot in resolution -smallest 720p, highest 2160p)
"""
if bboxA.point1 and bboxA.point2 and bboxB.point1 and bboxB.point2:
# centers for distance from left point to right (modulo by video width)
centerA1 = np.array([(bboxA.get_point1_x() + bboxA.get_width()/2) % self.video_width, bboxA.get_point1_y() + bboxA.get_height()/2])
centerB1 = np.array([(bboxB.get_point1_x() + bboxB.get_width()/2) % self.video_width, bboxB.get_point1_y() + bboxB.get_height()/2])
# centers for distance from right point to left (possible in equirectangular panorama)
# center bboxA x could be on video_width-100, center bboxB x could be on 100 (distance is 200 in equirectangular)
centerA2 = np.array([])
centerB2 = np.array([])
if centerA1[0] < centerB1[0]:
# e.g. center bboxA x is 100, center bboxB x is video_width - 100
centerA2 = np.array([self.video_width + centerA1[0], centerA1[1]])
centerB2 = np.array([centerB1[0], centerB1[1]])
else:
# e.g. center bboxA x is video_width - 100, center bboxB x is 100
centerA2 = np.array([centerA1[0], centerA1[1]])
centerB2 = np.array([self.video_width + centerB1[0], centerB1[1]])
################################## Stackoverflow atribution #######################################
# https://stackoverflow.com/questions/1401712/how-can-the-euclidean-distance-be-calculated-with-numpy
# Asked by <NAME>: https://stackoverflow.com/users/1084/nathan-fellman
# Answeerd by u0b34a0f6ae: https://stackoverflow.com/users/137317/u0b34a0f6ae
# euclidian distance is L2 norm
euclidian_dist1 = np.linalg.norm(centerA1 - centerB1)
euclidian_dist2 = np.linalg.norm(centerA2 - centerB2)
euclidian_dist = euclidian_dist1 if euclidian_dist1 < euclidian_dist2 else euclidian_dist2
# our dataset varies a lot in resolution (smallest 720p, highest 2160p)
SMALLEST_VIDEO_HEIGHT = 720
correct_ratio = self.video_width / SMALLEST_VIDEO_HEIGHT
# normalize it for correct plots
euclidian_dist = euclidian_dist / correct_ratio
# possible fix because of previous float rounding - min center error is 0
if euclidian_dist < 0:
euclidian_dist = 0
# return the intersection over union value
return euclidian_dist
else:
# precision plots got X range (0,51) - 100px should define tracker failure quite well
MAX_ERROR = 100
return MAX_ERROR
############################################################
############## Displaying results + groundtruth ############
############################################################
def runVideo(self):
"""Method for running video and drawing groundtruth + result bounding boxes"""
# resize window (lets define max width is 1600px)
if self.video_width < 1600:
cv2.namedWindow(self.WINDOW_NAME)
else:
cv2.namedWindow(self.WINDOW_NAME, cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO)
whRatio = self.video_width / self.video_height
if whRatio == 2:
# pure equirectangular 2:1
cv2.resizeWindow(self.WINDOW_NAME, 1600, 800)
else:
# default 16:9
cv2.resizeWindow(self.WINDOW_NAME, 1600, 900)
scaleFactor = self.video_width / 1600
self.RECTANGLE_BORDER_PX = int(self.RECTANGLE_BORDER_PX * scaleFactor)
self.FONT_SCALE = self.FONT_SCALE * scaleFactor
self.FONT_WEIGHT = int(self.FONT_WEIGHT * scaleFactor) + 1
self.TEXT_ROW1_POS = (int(self.TEXT_ROW1_POS[0] * scaleFactor), int(self.TEXT_ROW1_POS[1] * scaleFactor))
self.TEXT_ROW2_POS = (int(self.TEXT_ROW2_POS[0] * scaleFactor), int(self.TEXT_ROW2_POS[1] * scaleFactor))
self.TEXT_ROW2_POS2 = (int(self.TEXT_ROW2_POS2[0] * scaleFactor), int(self.TEXT_ROW2_POS2[1] * scaleFactor))
self.TEXT_ROW3_POS = (int(self.TEXT_ROW3_POS[0] * scaleFactor), int(self.TEXT_ROW3_POS[1] * scaleFactor))
self.TEXT_ROW3_POS2 = (int(self.TEXT_ROW3_POS2[0] * scaleFactor), int(self.TEXT_ROW3_POS2[1] * scaleFactor))
# prints just basic guide and info
print("----------------------------------------------------")
print("This script shows groundtruth and also tracker results bounding boxes of particular objects for purpose of visual object tracking evaluation")
print("Press 'Esc' or 'Q' key to exit")
print("----------------------------------------------------")
# FPS according to the original video
fps = self.video.get(cv2.CAP_PROP_FPS)
# fps = 30
# calculate the interval between frame.
interval = int(1000/fps)
# counter of frames
currentFrame = 0
# Just read first frame for sure
ok, frame = self.video.read()
if not ok:
print("Error - Could not read a video file")
self.video.release()
cv2.destroyAllWindows()
sys.exit(-1)
| |
__author__ = "<NAME> <<EMAIL>>"
__svnid__ = "$Id: calc.py 850 2009-05-01 00:24:27Z CodyPrecord $"
__revision__ = "$Revision: 850 $"
#--------------------------------------------------------------------------#
# Dependancies
import tkinter as tk
from tkinter import ttk,messagebox,filedialog
from noval import _,NewId
import noval.util.utils as utils
import noval.project.wizard as projectwizard
from noval.project.baseconfig import *
from noval.python.project.viewer import *
from noval.python.project.model import *
from noval.python.project.rundocument import *
import noval.consts as consts
import noval.imageutils as imageutils
import os
import noval.util.strutils as strutils
import noval.util.fileutils as fileutils
import noval.python.parser.utils as parserutils
from noval.project.executor import *
import noval.terminal as terminal
import noval.misc as misc
import noval.python.interpreter.pythonpackages as pythonpackages
import noval.python.interpreter.interpretermanager as interpretermanager
import noval.ui_utils as ui_utils
import noval.ttkwidgets.treeviewframe as treeviewframe
import noval.toolbar as toolbar
import noval.ui_base as ui_base
import noval.python.project.runconfiguration as runconfiguration
import noval.project.command as command
import noval.python.pyutils as pyutils
from pkg_resources import resource_filename
import datetime
if utils.is_py2():
from noval.util.which import which
elif utils.is_py3_plus():
from shutil import which
def SetVariablevar(variable):
path = filedialog.askdirectory()
if path:
path = fileutils.opj(path)
variable.set(path)
def GetInterpreterScriptPath(interpreter,is_user_site=False):
if is_user_site:
interpreter_path = os.path.dirname(interpreter.GetUserLibPath())
else:
interpreter_path = interpreter.InstallPath
return os.path.join(interpreter_path,"Scripts")
def GetToolPath(interpreter,name,is_user_site=False):
if utils.is_windows():
return os.path.join(GetInterpreterScriptPath(interpreter,is_user_site),name + ".exe")
return which(name)
def GetPyinstallerToolPath(interpreter):
pyinstaller_tool_path = GetToolPath(interpreter,"pyinstaller")
if not os.path.exists(pyinstaller_tool_path):
pyinstaller_tool_path = GetToolPath(interpreter,"pyinstaller",is_user_site=True)
if not os.path.exists(pyinstaller_tool_path):
raise RuntimeError(_("interpreter %s need to install package \"pyinstaller\"")%interpreter.Name)
return pyinstaller_tool_path
def GetPyinstallerMakeToolPath(interpreter):
pyinstallermake_tool_path = GetToolPath(interpreter,"pyi-makespec")
if not os.path.exists(pyinstallermake_tool_path):
pyinstallermake_tool_path = GetToolPath(interpreter,"pyi-makespec",is_user_site=True)
return pyinstallermake_tool_path
def CheckPyinstaller(interpreter,parent=None):
try:
GetPyinstallerToolPath(interpreter)
except RuntimeError as e:
messagebox.showinfo(GetApp().GetAppName(),str(e),parent=parent)
dlg = pythonpackages.InstallPackagesDialog(parent,interpreter,pkg_name='pyinstaller',install_args='--user pyinstaller',autorun=True)
status = dlg.ShowModal()
if status == constants.ID_CANCEL:
return False
return True
class ApplicationInformationConfiguration(runconfiguration.BaseConfiguration):
"""description of class"""
CONFIGURATION_NAME = 'ApplicationInformation'
def __init__(self,project_doc,main_module_file=None,**kwargs):
super(ApplicationInformationConfiguration,self).__init__(project_doc,main_module_file)
self.args = kwargs
def SaveConfiguration(self,config_key,configuration_name):
configuration_key = self.GetConfigurationKey(configuration_name,config_key)
class SpecOptionConfiguration(runconfiguration.BaseConfiguration):
"""description of class"""
CONFIGURATION_NAME = 'SpecOption'
def __init__(self,project_doc,main_module_file=None,**kwargs):
super(SpecOptionConfiguration,self).__init__(project_doc,main_module_file)
self.args = kwargs
def SaveConfiguration(self,**kwargs):
file_key_path = self.GetRootKeyPath()
for key,value in kwargs.items():
utils.profile_set(file_key_path + "/" + key,value)
class DatafilesConfiguration(runconfiguration.BaseConfiguration):
"""description of class"""
CONFIGURATION_NAME = 'Datafiles'
def __init__(self,project_doc,main_module_file=None,data_files=[]):
super(DatafilesConfiguration,self).__init__(project_doc,main_module_file)
def SaveConfiguration(self,config_key,configuration_name):
configuration_key = self.GetConfigurationKey(configuration_name,config_key)
utils.profile_set(configuration_key + "/Datafiles",self._startup_path_pattern)
class PyinstallerRunconfig(BaseRunconfig):
def __init__(self,interpreter,file_path,arg='',env=None,start_up=None,project=None):
self._interpreter = interpreter
self._project = project
self.filepath = file_path
self.file_name = os.path.basename(file_path)
self._interpreter = self._project.GetandSetProjectDocInterpreter()
CheckPyinstaller(interpreter)
pyinstaller_tool_path = GetPyinstallerToolPath(self._interpreter)
spec_path = self.GetSpecfilePath(file_path)
args = spec_path
main_module_file = self._project.GetModel().FindFile(file_path)
clean = utils.profile_get_int(self._project.GetFileKey(main_module_file) + "/CleanBuild",False)
ask = utils.profile_get_int(self._project.GetFileKey(main_module_file) + "/AskReplace",False)
log_level = utils.profile_get(self._project.GetFileKey(main_module_file) + "/LogLevel","INFO")
make_single = utils.profile_get_int(self._project.GetFileKey(main_module_file) + "/MakeSingleExe",False)
if clean:
args += " --clean"
if not ask:
args += " -y"
if log_level:
args += " --log-level " + log_level
if make_single:
args += " -F"
else:
args += " -D"
if utils.profile_get_int(self._project.GetKey('IsWindowsApplication'),False):
args += " -w"
else:
args += " -c"
character_set = utils.profile_get_int(self._project.GetFileKey(main_module_file) + "/Character",PyinstallerBaseInformationPanel.CHARACTER_NOTSET)
if PyinstallerBaseInformationPanel.CHARACTER_ASCII == character_set:
args += " -a"
BaseRunconfig.__init__(self,pyinstaller_tool_path,args,env,start_up,project)
@property
def Interpreter(self):
return self._interpreter
def GetSpecfilePath(self,file_name=None):
if file_name is None:
file_name = os.path.basename(self.Project.GetStartupFile().filePath)
main_module_file = self._project.GetModel().FindFile(file_name)
default_spec_filepath = self._project.GetDefaultSpecfilePath(file_name)
spec_file_path = utils.profile_get(self._project.GetFileKey(main_module_file) + "/SpecFilePath",default_spec_filepath)
if not os.path.exists(default_spec_filepath):
self.GenerateSepcFile(file_name)
return spec_file_path
def GenerateSepcFile(self,file_name):
pyinstallermake_tool_path = GetPyinstallerMakeToolPath(self._interpreter)
args = " %s"%file_name
project_path = self._project.GetPath()
utils.create_process(pyinstallermake_tool_path,args,cwd=project_path)
class PyinstallerProject(PythonProject):
def __init__(self):
super(PyinstallerProject,self).__init__()
self._runinfo.DocumentTemplate = "pyinstaller.pyinstall.PyinstallerProjectTemplate"
class PyinstallerProjectDocument(PythonProjectDocument):
def __init__(self, model=None):
PythonProjectDocument.__init__(self,model)
@staticmethod
def GetProjectModel():
return PyinstallerProject()
def CheckIsbuiltinInterpreter(self,run_parameter):
if run_parameter.Interpreter.IsBuiltIn:
raise RuntimeError(_('Builtin Interpreter is not support to run pyinstaller project'))
def Build(self):
''''''
pyinstaller_run_parameter = self.GetPyinstallerRunParameter()
if pyinstaller_run_parameter is None:
return
self.BuildDebugIndebugger(pyinstaller_run_parameter,finish_stopped=True)
def Rebuild(self):
''''''
pyinstaller_run_parameter = self.GetPyinstallerRunParameter()
if pyinstaller_run_parameter is None:
return
if pyinstaller_run_parameter.Arg.find(" --clean") == -1:
pyinstaller_run_parameter.Arg += " --clean"
self.BuildDebugIndebugger(pyinstaller_run_parameter,finish_stopped=True)
def GetDefaultSpecfilePath(self,file_path):
spec_file_path = os.path.join(self.GetPath(),strutils.get_filename_without_ext(os.path.basename(file_path)) + ".spec")
return spec_file_path
def GetTargetDir(self,pyinstaller_run_parameter):
return os.path.dirname(self.GetTargetPath(pyinstaller_run_parameter)[0])
def GetTargetPath(self,pyinstaller_run_parameter):
project_path = self.GetPath()
dist_path = os.path.join(project_path,'dist')
main_module_file = self.GetModel().FindFile(pyinstaller_run_parameter.filepath)
make_single = utils.profile_get_int(self.GetFileKey(main_module_file) + "/MakeSingleExe",False)
if utils.is_windows():
target_name = "%s.exe"%strutils.get_filename_without_ext(pyinstaller_run_parameter.file_name)
else:
target_name = "%s"%strutils.get_filename_without_ext(pyinstaller_run_parameter.file_name)
if not make_single:
dist_project_path = os.path.join(dist_path,strutils.get_filename_without_ext(pyinstaller_run_parameter.file_name))
target_exe_path = os.path.join(dist_project_path,target_name)
else:
target_exe_path = os.path.join(dist_path,target_name)
return target_exe_path,make_single
def BuildRunterminal(self,run_parameter):
self.CheckIsbuiltinInterpreter(run_parameter)
executor = TerminalExecutor(run_parameter)
command1 = executor.GetExecuteCommand()
target_exe_path = self.GetTargetPath(run_parameter)[0]
print ('run target exe path',target_exe_path,'in terminal')
run_parameter = BaseRunconfig(target_exe_path)
executor = TerminalExecutor(run_parameter)
command2 = executor.GetExecuteCommand()
command = command1 + " && " + command2
utils.get_logger().debug("start run executable: %s in terminal",command)
startIn = executor.GetStartupPath()
terminal.run_in_terminal(command,startIn,os.environ,keep_open=False,pause=True,title="abc")
def GetPyinstallerRunParameter(self,filetoRun=None):
python_run_parameter = PythonProjectDocument.GetRunParameter(self,filetoRun)
if python_run_parameter is None:
return None
pyinstaller_run_parameter = PyinstallerRunconfig(python_run_parameter.Interpreter,python_run_parameter.FilePath,'',python_run_parameter.Environment,python_run_parameter.StartupPath,python_run_parameter.Project)
return pyinstaller_run_parameter
def RunIndebugger(self):
pyinstaller_run_parameter = self.GetPyinstallerRunParameter()
if pyinstaller_run_parameter is None:
return
self.BuildDebugIndebugger(pyinstaller_run_parameter)
def RunInterminal(self,filetoRun=None):
pyinstaller_run_parameter = self.GetPyinstallerRunParameter(filetoRun)
if pyinstaller_run_parameter is None:
return
self.BuildRunterminal(pyinstaller_run_parameter)
def RunTarget(self,run_parameter):
target_exe_path = self.GetTargetPath()
def DebugRunTarget(self,run_parameter):
target_exe_path = self.GetTargetPath()
def BuildDebugIndebugger(self,run_parameter,finish_stopped=False):
self.CheckIsbuiltinInterpreter(run_parameter)
fileToRun = run_parameter.filepath
shortFile = os.path.basename(fileToRun)
view = GetApp().MainFrame.GetCommonView("Output")
view.SetRunParameter(run_parameter)
view.GetOutputview().SetTraceLog(True)
view.CreateExecutor(source="Build",finish_stopped=finish_stopped)
view.EnableToolbar()
view.Execute()
def CleanBuilddir(self):
project_path = self.GetPath()
build_dir = os.path.join(project_path,'build')
self.Cleandir(build_dir)
def CleanOutput(self):
pyinstaller_run_parameter = self.GetPyinstallerRunParameter()
target_exe_path,make_single = self.GetTargetPath(pyinstaller_run_parameter)
utils.get_logger().info('target path is %s----------',target_exe_path)
if make_single:
self.Cleanfile(target_exe_path)
else:
self.Cleandir(self.GetTargetDir(pyinstaller_run_parameter))
def CleanProject(self):
PythonProjectDocument.CleanProject(self)
self.CleanBuilddir()
self.CleanOutput()
class PyinstallerProjectTemplate(PythonProjectTemplate):
@staticmethod
def CreateProjectTemplate():
projectTemplate = PyinstallerProjectTemplate(GetApp().GetDocumentManager(),
_("Project File"),
"*%s" % consts.PROJECT_EXTENSION,
os.getcwd(),
consts.PROJECT_EXTENSION,
"PyinstallerProject Document",
_("PyinstallerProject Viewer"),
PyinstallerProjectDocument,
PythonProjectView,
icon = imageutils.getProjectIcon())
GetApp().GetDocumentManager().DisassociateTemplate(projectTemplate)
return projectTemplate
def GetPropertiPages(self):
return PythonProjectTemplate.GetPropertiPages(self) + [("Application information","file","pyinstaller.pyinstall.PyinstallerBaseInformationPanel"),\
("Spec option","file","pyinstaller.pyinstall.PyinstallSpecOptionPanel"),("Data files","file","pyinstaller.pyinstall.PyinstallDatafilesPanel"),\
("Application information","root","pyinstaller.pyinstall.PyinstallerBaseInformationPanel"),\
("Spec option","root","pyinstaller.pyinstall.PyinstallSpecOptionPanel"),("Data files","root","pyinstaller.pyinstall.PyinstallDatafilesPanel")]
class PyinstallerProjectNameLocationPage(BasePythonProjectNameLocationPage):
def __init__(self,master,**kwargs):
BasePythonProjectNameLocationPage.__init__(self,master,**kwargs)
self.can_finish = False
def GetProjectTemplate(self):
return PyinstallerProjectTemplate.CreateProjectTemplate()
class PyinstallerSimpleDemoNameLocationPage(PyinstallerProjectNameLocationPage):
demo_code = '''import argparse
def main():
parser = argparse.ArgumentParser()
args = parser.parse_args()
if __name__ == "__main__":
main()
'''
def __init__(self,master,**kwargs):
PyinstallerProjectNameLocationPage.__init__(self,master,**kwargs)
self.name_var.trace("w", self.SetPyinstallProjectStartuppath)
def SetPyinstallProjectStartuppath(self,*args):
self.startup_path_var.set("${ProjectDir}/%s.py"%self.name_var.get().strip())
def Finish(self):
if not PyinstallerProjectNameLocationPage.Finish(self):
return False
dirName = self.GetProjectLocation()
view = GetApp().MainFrame.GetProjectView().GetView()
startup_file_path = fileutils.opj(self.GetStartupfile())
with open(startup_file_path,"w") as f:
f.write(self.demo_code)
self.new_project_doc.GetCommandProcessor().Submit(command.ProjectAddFilesCommand(self.new_project_doc,[startup_file_path],None))
view.SetProjectStartupFile()
return True
class PyinstallerBaseInformationPanel(pyutils.PythonBaseConfigurationPanel):
CHARACTER_NOTSET = 0
CHARACTER_ASCII = 1
CHARACTER_UNICODE = 2
def __init__(self,parent,item,current_project,**kwargs):
pyutils.PythonBaseConfigurationPanel.__init__(self,parent,current_project)
self.columnconfigure(1, weight=1)
self.current_project = current_project
self.item = item
self.is_windows = kwargs.get('is_windows',False)
row_index = 0
if item is None:
root_file_key = "<KEY>
elif item == self.GetCurrentProject().GetFirstView()._treeCtrl.GetRootItem():
self.item = self.GetCurrentProject().GetFirstView()._treeCtrl.FindItem(self.GetCurrentProject().GetModel().StartupFile.filePath)
root_file_key = self.GetCurrentProject().GetFileKey(self.GetCurrentProject().GetModel().StartupFile)
else:
main_module_file = self.GetItemFile(item)
root_file_key = self.GetCurrentProject().GetFileKey(main_module_file)
ttk.Label(self,text=_('Application target name:')).grid(column=0, row=row_index, sticky="nsew",pady=(consts.DEFAUT_CONTRL_PAD_Y,0))
self.target_name_var = tk.StringVar(value=utils.profile_get(root_file_key + "/TargetName"))
target_entry = ttk.Entry(self,textvariable=self.target_name_var)
misc.create_tooltip(target_entry,_("The executable name of application (default: script's basename)"))
target_entry.grid(column=1, row=row_index, sticky="nsew",pady=(consts.DEFAUT_CONTRL_PAD_Y,0),padx=(consts.DEFAUT_CONTRL_PAD_X,0))
row_index += 1
ttk.Label(self, text=_('Application icon path:')).grid(column=0, row=row_index, sticky="nsew",pady=(consts.DEFAUT_CONTRL_PAD_Y,0))
self.icon_path_var = tk.StringVar(value=utils.profile_get(root_file_key + "/IconPath"))
icon_path_entry = ttk.Entry(self,textvariable=self.icon_path_var)
icon_path_entry.grid(column=1, row=row_index, sticky="nsew",pady=(consts.DEFAUT_CONTRL_PAD_Y,0),padx=(consts.DEFAUT_CONTRL_PAD_X,0))
ttk.Button(self, text= _("Browse..."),command=self.SetIconPath).grid(column=2, row=row_index, sticky="nsew",pady=(consts.DEFAUT_CONTRL_PAD_Y,0),padx=(consts.DEFAUT_CONTRL_PAD_X,0))
row_index += 1
ttk.Label(self,text=_('Output folder name:')).grid(column=0, row=row_index, sticky="nsew",pady=(consts.DEFAUT_CONTRL_PAD_Y,0))
self.output_folder_var = tk.StringVar(value=utils.profile_get(root_file_key + "/OutputFolder"))
self.output_folder_entry = ttk.Entry(self,textvariable=self.output_folder_var)
misc.create_tooltip(self.output_folder_entry,_("Specify the folder name when create a one-folder bundle containing an executable (default: script's basename)"))
self.output_folder_entry.grid(column=1, row=row_index, sticky="nsew",pady=(consts.DEFAUT_CONTRL_PAD_Y,0),padx=(consts.DEFAUT_CONTRL_PAD_X,0))
row_index += 1
ttk.Label(self,text=_('Character Set:')).grid(column=0, row=row_index, sticky="nsew",pady=(consts.DEFAUT_CONTRL_PAD_Y,0))
self.character_sets = ('Not Set','Use Ascii Character Set','Use Unicode Character Set')
self.character_var = tk.StringVar(value=self.character_sets[utils.profile_get_int(root_file_key + "/Character",self.CHARACTER_NOTSET)])
character_entry = ttk.Combobox(self,textvariable=self.character_var,values=self.character_sets,state="readonly")
misc.create_tooltip(character_entry,"The unicode encoding support (default: included if available)")
character_entry.grid(column=1, row=row_index, sticky="nsew",pady=(consts.DEFAUT_CONTRL_PAD_Y,0),padx=(consts.DEFAUT_CONTRL_PAD_X,0))
row_index += 1
frame = ttk.Frame(self)
frame.grid(column=0, row=row_index, sticky="nsew",pady=(consts.DEFAUT_CONTRL_PAD_Y,0),columnspan=3)
self.work_default_var = tk.BooleanVar(value=utils.profile_get_int(root_file_key + "/UseDefaultWork",True))
sbox = ttk.LabelFrame(frame, text=_("Work directory:"))
ttk.Checkbutton(sbox,text=_('Use Default'),variable=self.work_default_var,command=self.SetDefaultWorkpath).pack(fill="x",padx=consts.DEFAUT_CONTRL_PAD_X)
ttk.Label(sbox, text=_('Path:')).pack(side=tk.LEFT,padx=(consts.DEFAUT_CONTRL_PAD_X,0),pady=(0,consts.DEFAUT_CONTRL_PAD_Y))
self.work_path_var = tk.StringVar(value="./build")
self.work_path_entry = ttk.Entry(sbox,textvariable=self.work_path_var)
self.work_path_entry.pack(side=tk.LEFT,fill="x",expand=1,padx=(consts.DEFAUT_CONTRL_PAD_X,0),pady=(0,consts.DEFAUT_CONTRL_PAD_Y))
misc.create_tooltip(self.work_path_entry,_('Where to put all the temporary work files, .log, .pyz and etc. (default: ./build)'))
self.work_default_btn = ttk.Button(sbox, text= _("Browse..."),command=self.SetWorkPath)
self.work_default_btn.pack(side=tk.LEFT,padx=(consts.DEFAUT_CONTRL_PAD_X,0),pady=(0,consts.DEFAUT_CONTRL_PAD_Y))
sbox.pack(fill="x")
self.output_default_var = tk.BooleanVar(value=utils.profile_get_int(root_file_key + "/UseDefaultDist",True))
sbox = ttk.LabelFrame(frame, text=_("Dist directory:"))
ttk.Checkbutton(sbox,text=_('Use Default'),variable=self.output_default_var,command=self.SetDefaultDistpath).pack(fill="x",padx=consts.DEFAUT_CONTRL_PAD_X)
ttk.Label(sbox, text=_('Path:')).pack(side=tk.LEFT,padx=(consts.DEFAUT_CONTRL_PAD_X,0),pady=(0,consts.DEFAUT_CONTRL_PAD_Y))
self.dist_path_var = tk.StringVar(value="./dist")
self.dist_path_entry = ttk.Entry(sbox,textvariable=self.dist_path_var)
self.dist_path_entry.pack(side=tk.LEFT,fill="x",expand=1,padx=(consts.DEFAUT_CONTRL_PAD_X,0),pady=(0,consts.DEFAUT_CONTRL_PAD_Y))
misc.create_tooltip(self.dist_path_entry,_('Where to put the bundled app (default: ./dist)'))
self.dist_default_btn = ttk.Button(sbox, text= _("Browse..."),command=self.SetDistPath)
self.dist_default_btn.pack(side=tk.LEFT,padx=(consts.DEFAUT_CONTRL_PAD_X,0),pady=(0,consts.DEFAUT_CONTRL_PAD_Y))
sbox.pack(fill="x",pady=(consts.DEFAUT_CONTRL_PAD_Y,0))
row_index += 1
self.make_single_var = tk.BooleanVar(value=utils.profile_get_int(root_file_key + "/MakeSingleExe",False))
check_single_btn = ttk.Checkbutton(self,text=_('Make a single exetuable file'),variable=self.make_single_var,command=self.SetSingleFile)
check_single_btn.grid(column=0, row=row_index, sticky="nsew",columnspan=2,pady=consts.DEFAUT_CONTRL_PAD_Y)
self.SetDefaultWorkpath()
self.SetDefaultDistpath()
self.DisableNoPythonfile(item)
def SetIconPath(self):
descr = (_("Icon File"),'.ico')
path = filedialog.askopenfilename(
master=self,
filetypes=[descr]
)
if not path:
return
self.icon_path_var.set(fileutils.opj(path))
def SetWorkPath(self):
SetVariablevar(self.work_path_var)
def SetDistPath(self):
SetVariablevar(self.dist_path_var)
def SetDefaultWorkpath(self):
if self.work_default_var.get():
self.work_path_entry['state'] = tk.DISABLED
self.work_default_btn['state'] = tk.DISABLED
else:
self.work_path_entry['state'] = tk.NORMAL
self.work_default_btn['state'] = tk.NORMAL
def SetDefaultDistpath(self):
if self.output_default_var.get():
self.dist_path_entry['state'] = tk.DISABLED
self.dist_default_btn['state'] = tk.DISABLED
else:
self.dist_path_entry['state'] = tk.NORMAL
self.dist_default_btn['state'] = tk.NORMAL
def SetSingleFile(self):
if self.make_single_var.get():
self.output_folder_entry['state'] = tk.DISABLED
else:
self.output_folder_entry['state'] = tk.NORMAL
def OnOK(self,optionsDialog=None):
interpreter = self.GetInterpreter()
if not CheckPyinstaller(interpreter,parent=self) or interpreter is None:
return False
target_name = self.target_name_var.get()
output_folder = self.output_folder_var.get()
character = self.character_var.get()
self.work_default_var.get()
self.dist_path_var.get()
pyinstallermake_tool_path = GetPyinstallerMakeToolPath(interpreter)
icon_path = self.icon_path_var.get().strip()
args = ""
if icon_path:
args += " -i %s"%icon_path
option_panel = self.GetOptionPanel()
spec_name = option_panel.spec_name_var.get()
if spec_name:
args += " -n %s"%spec_name
spec_path = option_panel.spec_path_var.get()
if spec_path:
args += " --specpath %s"%spec_path
if self.IsSpecExist():
ret = messagebox.askyesno(_("Spec file exist"),_("Spec file already exist in spec path,Do you want to replace it?"),parent=self)
if not ret:
utils.profile_set(self.GetCurrentProject().GetFileKey(self.GetStartupfile(),'SpecFilePath'),self.GetSpecfilePath())
return True
if option_panel.hidden_imports_var.get().strip():
hidden_imports = option_panel.hidden_imports_var.get().split(",")
for hidden_import in hidden_imports:
args += " --hidden-import %s"%hidden_import
if self.make_single_var.get():
args += " -F"
| |
of warnings produced by the last statement execution.
ATTENTION: This function will be removed in a future release, use
the get_warnings_count function instead.
get_warnings()
Retrieves the warnings generated by the executed operation.
get_warnings_count()
The number of warnings produced by the last statement execution.
help([member])
Provides help about this class and it's members
#@<OUT> Help on Result
NAME
Result - Allows retrieving information about non query operations
performed on the database.
DESCRIPTION
An instance of this class will be returned on the CRUD operations that
change the content of the database:
- On Table: insert, update and delete
- On Collection: add, modify and remove
Other functions on the Session class also return an instance of this
class:
- Transaction handling functions
PROPERTIES
affected_item_count
Same as get_affected_item_count
ATTENTION: This property will be removed in a future release, use
the affected_items_count property instead.
affected_items_count
Same as get_affected_items_count
auto_increment_value
Same as get_auto_increment_value
execution_time
Same as get_execution_time
generated_ids
Same as get_generated_ids.
warning_count
Same as get_warning_count
ATTENTION: This property will be removed in a future release, use
the warnings_count property instead.
warnings
Same as get_warnings
warnings_count
Same as get_warnings_count
FUNCTIONS
get_affected_item_count()
The the number of affected items for the last operation.
ATTENTION: This function will be removed in a future release, use
the get_affected_items_count function instead.
get_affected_items_count()
The the number of affected items for the last operation.
get_auto_increment_value()
The last insert id auto generated (from an insert operation)
get_execution_time()
Retrieves a string value indicating the execution time of the
executed operation.
get_generated_ids()
Returns the list of document ids generated on the server.
get_warning_count()
The number of warnings produced by the last statement execution.
ATTENTION: This function will be removed in a future release, use
the get_warnings_count function instead.
get_warnings()
Retrieves the warnings generated by the executed operation.
get_warnings_count()
The number of warnings produced by the last statement execution.
help([member])
Provides help about this class and it's members
#@<OUT> Help on RowResult
NAME
RowResult - Allows traversing the Row objects returned by a Table.select
operation.
DESCRIPTION
Allows traversing the Row objects returned by a Table.select operation.
PROPERTIES
affected_items_count
Same as get_affected_items_count
column_count
Same as get_column_count
column_names
Same as get_column_names
columns
Same as get_columns
execution_time
Same as get_execution_time
warning_count
Same as get_warning_count
ATTENTION: This property will be removed in a future release, use
the warnings_count property instead.
warnings
Same as get_warnings
warnings_count
Same as get_warnings_count
FUNCTIONS
fetch_all()
Returns a list of DbDoc objects which contains an element for every
unread document.
fetch_one()
Retrieves the next Row on the RowResult.
fetch_one_object()
Retrieves the next Row on the result and returns it as an object.
get_affected_items_count()
The the number of affected items for the last operation.
get_column_count()
Retrieves the number of columns on the current result.
get_column_names()
Gets the columns on the current result.
get_columns()
Gets the column metadata for the columns on the active result.
get_execution_time()
Retrieves a string value indicating the execution time of the
executed operation.
get_warning_count()
The number of warnings produced by the last statement execution.
ATTENTION: This function will be removed in a future release, use
the get_warnings_count function instead.
get_warnings()
Retrieves the warnings generated by the executed operation.
get_warnings_count()
The number of warnings produced by the last statement execution.
help([member])
Provides help about this class and it's members
#@<OUT> Help on Schema
NAME
Schema - Represents a Schema as retrieved from a session created using
the X Protocol.
DESCRIPTION
View Support
MySQL Views are stored queries that when executed produce a result set.
MySQL supports the concept of Updatable Views: in specific conditions are
met, Views can be used not only to retrieve data from them but also to
update, add and delete records.
For the purpose of this API, Views behave similar to a Table, and so they
are treated as Tables.
Tables and Collections as Properties
A Schema object may expose tables and collections as properties, this way
they can be accessed as:
- schema.<collection_name>
- schema.<table_name>
This handy way of accessing tables and collections is available if they
met the following conditions:
- They existed at the moment the Schema object was retrieved from the
session.
- The name is a valid identifier.
- The name is different from any other property or function on the Schema
object.
If any of the conditions is not met, the way to access the table or
collection is by using the standard DevAPI functions:
- schema.get_table(<name>)
- schema.get_collection(<name>)
PROPERTIES
name
The name of this database object.
schema
The Schema object of this database object.
session
The Session object of this database object.
Some tables and collections are also exposed as properties of the Schema
object. For details look at 'Tables and Collections as Properties' on the
DETAILS section.
FUNCTIONS
create_collection(name[, options])
Creates in the current schema a new collection with the specified
name and retrieves an object representing the new collection
created.
drop_collection()
Drops the specified collection.
exists_in_database()
Verifies if this object exists in the database.
get_collection(name)
Returns the Collection of the given name for this schema.
get_collection_as_table(name)
Returns a Table object representing a Collection on the database.
get_collections()
Returns a list of Collections for this Schema.
get_name()
Returns the name of this database object.
get_schema()
Returns the Schema object of this database object.
get_session()
Returns the Session object of this database object.
get_table(name)
Returns the Table of the given name for this schema.
get_tables()
Returns a list of Tables for this Schema.
help([member])
Provides help about this class and it's members
modify_collection(name, options)
Modifies the schema validation of a collection.
RELATED TOPICS
- Dynamic Properties
#@<OUT> Help on Session
NAME
Session - Enables interaction with a MySQL Server using the X Protocol.
DESCRIPTION
Document Store functionality can be used through this object, in addition
to SQL.
This class allows performing database operations such as:
- Schema management operations.
- Access to relational tables.
- Access to Document Store collections.
- Enabling/disabling warning generation.
- Retrieval of connection information.
PROPERTIES
current_schema
Retrieves the active schema on the session.
default_schema
Retrieves the Schema configured as default for the session.
uri
Retrieves the URI for the current session.
FUNCTIONS
close()
Closes the session.
commit()
Commits all the operations executed after a call to
start_transaction().
create_schema(name)
Creates a schema on the database and returns the corresponding
object.
drop_schema(name)
Drops the schema with the specified name.
get_current_schema()
Retrieves the active schema on the session.
get_default_schema()
Retrieves the Schema configured as default for the session.
get_schema(name)
Retrieves a Schema object from the current session through it's
name.
get_schemas()
Retrieves the Schemas available on the session.
get_uri()
Retrieves the URI for the current session.
help([member])
Provides help about this class and it's members
is_open()
Returns true if session is known to be open.
quote_name(id)
Escapes the passed identifier.
release_savepoint(name)
Removes a savepoint defined on a transaction.
rollback()
Discards all the operations executed after a call to
start_transaction().
rollback_to(name)
Rolls back the transaction to the named savepoint without
terminating the transaction.
run_sql(query[, args])
Executes a query and returns the corresponding SqlResult object.
set_current_schema(name)
Sets the current schema for this session, and returns the schema
object for it.
set_fetch_warnings(enable)
Enables or disables warning generation.
set_savepoint([name])
Creates or replaces a transaction savepoint with the given name.
sql(statement)
Creates a SqlExecute object to allow running the received SQL
statement on the target MySQL Server.
start_transaction()
Starts a transaction context on the server.
#@<OUT> Help on SqlExecute
NAME
SqlExecute - Handler for execution SQL statements, supports parameter
binding.
DESCRIPTION
This object should only be created by calling the sql function at a
Session instance.
FUNCTIONS
bind(data)
Registers a value or a list of values to be bound on the execution
of the SQL statement.
execute()
Executes the sql statement.
help([member])
Provides help about this class and it's members
sql(statement)
Sets the sql statement to be executed by this handler.
#@<OUT> Help on Table
NAME
Table - Represents a Table on an Schema, retrieved with a session created
using mysqlx module.
DESCRIPTION
Represents | |
generated will satisfy the property,
but there will be some missing.
- ``size`` -- (default: ``None``) the size of the graph to be generated.
- ``degree_sequence`` -- (default: ``None``) a sequence of non-negative integers,
or ``None``. If specified, the generated graphs will have these
integers for degrees. In this case, property and size are both
ignored.
- ``loops`` -- (default: ``False``) whether to allow loops in the graph
or not.
- ``sparse`` -- (default: ``True``); whether to use a sparse or dense data
structure. See the documentation of :class:`~sage.graphs.graph.Graph`.
- ``copy`` (boolean) -- If set to ``True`` (default)
this method makes copies of the graphs before returning
them. If set to ``False`` the method returns the graph it
is working on. The second alternative is faster, but modifying
any of the graph instances returned by the method may break
the function's behaviour, as it is using these graphs to
compute the next ones: only use ``copy = False`` when
you stick to *reading* the graphs returned.
EXAMPLES:
Print graphs on 3 or less vertices::
sage: for G in graphs(3, augment='vertices'):
....: print(G)
Graph on 0 vertices
Graph on 1 vertex
Graph on 2 vertices
Graph on 3 vertices
Graph on 3 vertices
Graph on 3 vertices
Graph on 2 vertices
Graph on 3 vertices
Print graphs on 3 vertices.
::
sage: for G in graphs(3):
....: print(G)
Graph on 3 vertices
Graph on 3 vertices
Graph on 3 vertices
Graph on 3 vertices
Generate all graphs with 5 vertices and 4 edges.
::
sage: L = graphs(5, size=4)
sage: len(list(L))
6
Generate all graphs with 5 vertices and up to 4 edges.
::
sage: L = list(graphs(5, lambda G: G.size() <= 4))
sage: len(L)
14
sage: graphs_list.show_graphs(L) # long time
Generate all graphs with up to 5 vertices and up to 4 edges.
::
sage: L = list(graphs(5, lambda G: G.size() <= 4, augment='vertices'))
sage: len(L)
31
sage: graphs_list.show_graphs(L) # long time
Generate all graphs with degree at most 2, up to 6 vertices.
::
sage: property = lambda G: ( max([G.degree(v) for v in G] + [0]) <= 2 )
sage: L = list(graphs(6, property, augment='vertices'))
sage: len(L)
45
Generate all bipartite graphs on up to 7 vertices: (see
:oeis:`A033995`)
::
sage: L = list( graphs(7, lambda G: G.is_bipartite(), augment='vertices') )
sage: [len([g for g in L if g.order() == i]) for i in [1..7]]
[1, 2, 3, 7, 13, 35, 88]
Generate all bipartite graphs on exactly 7 vertices::
sage: L = list( graphs(7, lambda G: G.is_bipartite()) )
sage: len(L)
88
Generate all bipartite graphs on exactly 8 vertices::
sage: L = list( graphs(8, lambda G: G.is_bipartite()) ) # long time
sage: len(L) # long time
303
Remember that the property argument does not behave as a filter,
except for appropriately inheritable properties::
sage: property = lambda G: G.is_vertex_transitive()
sage: len(list(graphs(4, property)))
1
sage: sum(1 for g in graphs(4) if property(g))
4
sage: property = lambda G: G.is_bipartite()
sage: len(list(graphs(4, property)))
7
sage: sum(1 for g in graphs(4) if property(g))
7
Generate graphs on the fly: (see :oeis:`A000088`)
::
sage: for i in range(7):
....: print(len(list(graphs(i))))
1
1
2
4
11
34
156
Generate all simple graphs, allowing loops: (see :oeis:`A000666`)
::
sage: L = list(graphs(5,augment='vertices',loops=True)) # long time
sage: for i in [0..5]: # long time
....: print((i, len([g for g in L if g.order() == i]))) # long time
(0, 1)
(1, 2)
(2, 6)
(3, 20)
(4, 90)
(5, 544)
Generate all graphs with a specified degree sequence (see :oeis:`A002851`)::
sage: for i in [4,6,8]: # long time (4s on sage.math, 2012)
....: print((i, len([g for g in graphs(i, degree_sequence=[3]*i) if g.is_connected()])))
(4, 1)
(6, 2)
(8, 5)
sage: for i in [4,6,8]: # long time (7s on sage.math, 2012)
....: print((i, len([g for g in graphs(i, augment='vertices', degree_sequence=[3]*i) if g.is_connected()])))
(4, 1)
(6, 2)
(8, 5)
::
sage: print((10, len([g for g in graphs(10,degree_sequence=[3]*10) if g.is_connected()]))) # not tested
(10, 19)
Make sure that the graphs are really independent and the generator
survives repeated vertex removal (:trac:`8458`)::
sage: for G in graphs(3):
....: G.delete_vertex(0)
....: print(G.order())
2
2
2
2
REFERENCE:
- <NAME>, Isomorph-Free Exhaustive generation. *Journal
of Algorithms*, Volume 26, Issue 2, February 1998, pages 306-324.
"""
###########################################################################
# Graph Iterators
###########################################################################
def __call__(self, vertices=None, property=None, augment='edges',
size=None, degree_sequence=None, loops=False, sparse=True, copy = True):
"""
Accesses the generator of isomorphism class representatives.
Iterates over distinct, exhaustive representatives. See the docstring
of this class for full documentation.
EXAMPLES:
Print graphs on 3 or less vertices::
sage: for G in graphs(3, augment='vertices'):
....: print(G)
Graph on 0 vertices
Graph on 1 vertex
Graph on 2 vertices
Graph on 3 vertices
Graph on 3 vertices
Graph on 3 vertices
Graph on 2 vertices
Graph on 3 vertices
::
sage: for g in graphs():
....: if g.num_verts() > 3: break
....: print(g)
Graph on 0 vertices
Graph on 1 vertex
Graph on 2 vertices
Graph on 2 vertices
Graph on 3 vertices
Graph on 3 vertices
Graph on 3 vertices
Graph on 3 vertices
For more examples, see the class level documentation, or type::
sage: graphs? # not tested
REFERENCE:
- <NAME>, Isomorph-Free Exhaustive generation.
Journal of Algorithms Volume 26, Issue 2, February 1998,
pages 306-324.
"""
# Use nauty for the basic case, as it is much faster.
if (vertices and property is None and size is None and
degree_sequence is None and not loops and augment == 'edges' and
sparse and copy):
for g in graphs.nauty_geng(vertices):
yield g
return
if property is None:
def property(x):
return True
from sage.graphs.all import Graph
from copy import copy as copyfun
if degree_sequence is not None:
if vertices is None:
raise NotImplementedError
if len(degree_sequence) != vertices or sum(degree_sequence)%2 or sum(degree_sequence) > vertices*(vertices-1):
raise ValueError("Invalid degree sequence.")
degree_sequence = sorted(degree_sequence)
if augment == 'edges':
def property(x):
D = sorted(x.degree())
return all(degree_sequence[i] >= d for i, d in enumerate(D))
def extra_property(x):
return degree_sequence == sorted(x.degree())
else:
def property(x):
D = sorted(x.degree() + [0] * (vertices - x.num_verts()))
return all(degree_sequence[i] >= d for i, d in enumerate(D))
def extra_property(x):
if x.num_verts() != vertices:
return False
return degree_sequence == sorted(x.degree())
elif size is not None:
def extra_property(x):
return x.size() == size
else:
def extra_property(x):
return True
if augment == 'vertices':
if vertices is None:
raise NotImplementedError
g = Graph(loops=loops, sparse=sparse)
for gg in canaug_traverse_vert(g, [], vertices, property, loops=loops, sparse=sparse):
if extra_property(gg):
yield copyfun(gg) if copy else gg
elif augment == 'edges':
if vertices is None:
from sage.rings.all import Integer
vertices = Integer(0)
while True:
for g in self(vertices, loops=loops, sparse=sparse):
yield copyfun(g) if copy else g
vertices += 1
g = Graph(vertices, loops=loops, sparse=sparse)
gens = []
for i in range(vertices-1):
gen = list(range(i))
gen.append(i+1)
gen.append(i)
gen += list(range(i + 2, vertices))
gens.append(gen)
for gg in canaug_traverse_edge(g, gens, property, loops=loops, sparse=sparse):
if extra_property(gg):
yield copyfun(gg) if copy else gg
else:
raise NotImplementedError
def nauty_geng(self, options="", debug=False):
r"""
Return a generator which creates graphs from nauty's geng program.
INPUT:
- ``options`` -- string (default: ``""``); a string passed to ``geng``
as if it was run at a system command line. At a minimum, you *must*
pass the number of vertices you desire. Sage expects the graphs to be
in nauty's "graph6" format, do not set an option to change this
default or results will be unpredictable.
- ``debug`` -- boolean (default: ``False``); if ``True`` the first line
of ``geng``'s output to standard error is captured and the first call
to the generator's ``next()`` function will return this line as a
string. A line leading with ">A" indicates a successful initiation of
the program with some information on the arguments, while a line
beginning with ">E" indicates an error with the input.
The possible options, obtained as output | |
from process_pf import *
from process_ez import *
from process_full import *
import pandas as pd
from numpy import random
import logging
import dask
import dask.dataframe as dd
dask.set_options(get=dask.multiprocessing.get) #switch from default multithreading to multiprocessing
# Code by <NAME> (<EMAIL>), 2016-2017
class Deduplicate():
"""
Class that holds the deduplicate method, which knows how to select the "best" of any given EIN that
shows up more than once in the index. This was originally split into its own class so it could be
inherited in multiple places, but that is now redundant; it is only inherited and called by the Write
class.
"""
def deduplicate(self, source, form, dup_criteria):
"""
Method for efficiently removing duplicate EINs based on criteria specified in functions found in
process_co_pc and process_pf.
Originally the process was handled by building a column of tuples to sort by, but because of the
large apply it was very slow. The new process works by building temporary columns in order to take
advanrage of vectorized operations under the hood of Pandas.
ARGUMENTS
source (dict) : A dictionary of DataFrames
form (str) : The current form, e.g. 'CO', 'PC'
dup_criteria (func) : The appropriate function for deduplicating the current form
RETURNS
DataFrame (with unique index)
"""
main = self.main
#first discard any entries with FISYR that is more than two years behind the release year
release_year = main.data.core_file_year
df = source[form][source[form]['FISYR'] >= release_year-2]
#split the data into one dataframe of all duplicates:
dups = df[
df.index.duplicated(keep=False)
].copy(deep=True)
dups['rnd'] = random.random(len(dups))
#and one dataframe of the unique eins
df = df[
~df.index.duplicated(keep=False)
].copy(deep=True)
main.logger.info('Removing duplicate EINs from {}... '.format(form))
#this old method works but is super slow due to the apply
# #add a column where the duplicate selection criteria are in order, in a tuple
# dups['dup_criteria'] = dup_criteria(dups)
#
# #take only the obs from each EIN with the max value of 'dup_criteria', which is calculated left to right
# singled = dups.reset_index().groupby('EIN').apply(
# lambda dups: dups[dups['dup_criteria'] == dups['dup_criteria'].max()]
# ).set_index('EIN')
# #drop the dup_criteria rand rnd columns
# singled.drop('dup_criteria', axis=1, inplace=True)
# singled.drop('rnd', axis=1, inplace=True)
release_year = main.data.core_file_year #an int, e.g. 2005, when the primary FISYR should be 2005
start_len = len(dups)
dups, conditions = dup_criteria(dups)
for cond in conditions:
if cond == 'FISYR' and form != 'PF':
#need to check FISYR for dups, to make sure we avoid the situation where, for example, the prior release was 2014 but the current release has both 2015 and
#2016 in it. if we just take the newest FISYR in that case, then the 2015 data will never appear in any release. but we can't just take the 2015 data over
#the 2016 data, because some firms may have had 2015 data in the prior release, and thus the 2016 data SHOULD be in this release.
#
#excludes PF because prior year data is not merged into the PF file
dups['fisyr_max_minus'] = dups.groupby('EIN').apply(lambda g: ((g['FISYR'] == release_year) & (g['FISYRP'] == release_year-1)) | ((g['FISYR'] == release_year+1) & (g['FISYRP'].isnull()))).groupby('EIN').sum() == 2
else:
dups['fisyr_max_minus'] = False
#dup_maxes = (dups.reset_index().groupby('EIN')[cond].max() - dups.groupby('EIN')['fisyr_max_minus'].max()).to_frame() #will be -0 for all non-FISYR conditions, otherwise will subtract 1 if it meets the above condition for FISYR
#dups = dups.merge(dup_maxes, left_index=True, right_index=True, how='left', suffixes=('', '_max'))
dups[cond+'_max'] = dups.groupby('EIN')[cond].max() - dups.groupby('EIN')['fisyr_max_minus'].max() #will be -0 for all non-FISYR conditions, otherwise will subtract 1 if it meets the above condition for FISYR
first_len = len(dups)
dups = dups[dups[cond] == dups[cond+'_max']]
after_len = len(dups)
main.logger.info(' {} EINs dropped based on {} from {}.'.format(first_len-after_len, cond, form))
dups.drop([c+'_max' for c in conditions], axis=1, inplace=True)
dups.drop(['val', 'rnd', 'fisyr_max_minus'], axis=1, inplace=True)
#merge the single-fied duplicate observations back to the original data
df = pd.concat([df, dups])#singled])
assert(df.index.is_unique), 'Deduplication process did not result in unique EINs in {}'.format(form)
main.logger.info('{} complete, {} total observations dropped.'.format(form, start_len-len(dups)))
return df
class Process(ProcessPF, ProcessEZ, ProcessFull, Deduplicate):
"""
Base class for the calculation of all new columns in the core files. It holds the methods to create
any columns that appear in all three 990 forms (calculated at the initial Full-EZ-PF level, not the
final CO-PC-PF level), while it inherits the methods used by only one or two of the forms from the
ProcessPF, ProcessEZ and ProcessFull classes.
"""
def __init__(self, main, parallelize=False):
self.main = main
self.parallelize = parallelize
self.chunksize = 100000
def manual_fixes(self):
"""
Base method for applying any manual, one-time fixes to the data. This is usually defined as a change
to a single EIN from a single year, in a non-generalizable way, e.g. a mistyped EIN in the raw IRS data.
ARGUMENTS
None
RETURNS
None
"""
main = self.main
if 'PF' in main.forms:
self.pf_manual()
if 'EZ' in main.forms:
self.ez_manual()
if 'Full' in main.forms:
self.full_manual()
def calculate_columns(self):
"""
Base method for creating all new, calculated columns in the core files. The option to parallelize
the calculations using Dask was added here, but was not found to speed the apply process up, so
its use is not recommended.
ARGUMENTS
None
RETURNS
None
"""
main = self.main
#process columns common to all forms:
main.logger.info('Calculating new columns common to all dataframes.')
for form in main.forms:
df = main.data_dict[form]
assert(df.index.name == 'EIN')
df['FISYR'] = self.all_fisyr(df)
df['ACCPER'] = self.all_accper(df)
df['NCCSKEY'], df['NCCSKEY2'] = self.all_nccskey(df)
df['RANDNUM'] = self.all_randnum(df)
df['NTEECC'] = self.all_nteecc(df)
df['NTEE1'] = self.all_ntee1(df)
df['NTEEFINAL1'] = self.all_nteefinal1(df)
df['LEVEL4'] = self.all_level4(df)
if self.parallelize:
df = dd.from_pandas(df, chunksize=self.chunksize)
df = self.all_level1(df)
df = df.rename(columns={'dask_result':'LEVEL1'})
df = self.all_ntmaj10(df)
df = df.rename(columns={'dask_result':'NTMAJ10'})
df = self.all_majgrpb(df)
df = df.rename(columns={'dask_result':'MAJGRPB'})
df = self.all_level3(df)
df = df.rename(columns={'dask_result':'LEVEL3'})
df = self.all_level2(df)
df = df.rename(columns={'dask_result':'LEVEL2'})
df = self.all_ntmaj12(df)
df = df.rename(columns={'dask_result':'NTMAJ12'})
df = self.all_ntmaj5(df)
df = df.rename(columns={'dask_result':'NTMAJ5'})
df = df.compute()
else:
df['LEVEL1'] = self.all_level1(df)
df['NTMAJ10'] = self.all_ntmaj10(df)
df['MAJGRPB'] = self.all_majgrpb(df)
df['LEVEL3'] = self.all_level3(df)
df['LEVEL2'] = self.all_level2(df)
df['NTMAJ12'] = self.all_ntmaj12(df)
df['NTMAJ5'] = self.all_ntmaj5(df)
if 'PF' in main.forms:
self.pf_calculate()
if 'EZ' in main.forms:
self.ez_calculate()
if 'Full' in main.forms:
self.full_calculate()
main.logger.info('All columns calculated.\n')
# def handle_duplicates(self):
# #Redundant method: deduplication moved to Write class.
# main = self.main
#
# from process_full import full_dup_criteria
# from process_ez import ez_dup_criteria
# from process_pf import pf_dup_criteria
# dup_crit_fns = {'EZ':ez_dup_criteria, 'Full':full_dup_criteria, 'PF':pf_dup_criteria}
#
# for form in main.forms:
# dup_criteria = dup_crit_fns[form]
# main.data_dict[form] = self.deduplicate(main.data_dict, form, dup_criteria)
#
# main.logger.info('All duplicate EINs removed.\n')
def parallel_apply(self, df, func):
"""
Experimental.
ARGUMENTS
df (dask.core.DataFrame) : A Dask dataframe
func (func) : The function that needs to be applied in parallel
RETURNS
dask.core.DataFrame
"""
assert(isinstance(df, dd.core.DataFrame)), 'A non-Dask dataframe was sent to the parallel_apply method.'
# df = df.assign(dask_result=df.apply(lambda r: func(r), axis=1, meta=str))
# df = df.assign(dask_result=df.map_partitions(func, columns=[c for c in ['SUBSECCD', 'FNDNCD', 'NTEEFINAL', 'LEVEL3', 'NTMAJ10'] if c in df.columns]))
df = df.assign(dask_result=df.map_partitions(func, meta=str))
return df
# out_string = 'Handling parallely apply for {}'.format(func)
# self.main.logger.info(out_string)
# nprocs = self.nprocs #number of pieces to split the df into
# split_size = int(np.ceil(len(df) / nprocs)) #number of rows in each split
#
# def parallel_func(args):
# func, df = args
# return df.apply(lambda r: func(r), axis=1)
#
# split_df = [df.iloc[i*split_size:(i+1)*split_size] if (i+1)*split_size <= len(df) else df.iloc[i*split_size:len(df)] for i in range(nprocs)]
#
# param_set = zip([func]*len(split_df), split_df)
# with closing(mp.Pool(nprocs)) as pool:
# recombine = []
# for series in pool.imap_unordered(parallel_func, param_set):
# recombine.append(series)
# return pd.concat(recombine)
def all_randnum(self, df):
"""
Generates a NumPy array of random numbers, the same length as the core file dataframe.
ARGUMENTS
df (DataFrame) : Core file dataframe
RETURNS
Array
"""
return random.random(len(df))
def all_nccskey(self, df):
"""
Generates two new string columns, one in the form EIN+TAXPER, the other EIN+FISYR.
ARGUMENTS
df (DataFrame) : Core file dataframe
RETURNS
Two Series
"""
main = self.main
temp_df = df.reset_index() #pulls EIN out of the index
#Joins the columns as strings. Note, TAXPER is tax_prd in IRS original
new_col1 = temp_df['EIN'] + temp_df['TAXPER']
new_col2 = temp_df['EIN'] + temp_df['FISYR'].astype(str)
#resets the indices back to EIN
new_col1.index = df.index
new_col2.index = df.index
return new_col1, new_col2
def all_fisyr(self, df):
"""
Generates | |
<gh_stars>0
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['AlternativeContactArgs', 'AlternativeContact']
@pulumi.input_type
class AlternativeContactArgs:
def __init__(__self__, *,
alternate_contact_type: pulumi.Input[str],
email_address: pulumi.Input[str],
phone_number: pulumi.Input[str],
title: pulumi.Input[str],
account_id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a AlternativeContact resource.
:param pulumi.Input[str] alternate_contact_type: The type of the alternate contact. Allowed values are: `BILLING`, `OPERATIONS`, `SECURITY`.
:param pulumi.Input[str] email_address: An email address for the alternate contact.
:param pulumi.Input[str] phone_number: A phone number for the alternate contact.
:param pulumi.Input[str] title: A title for the alternate contact.
:param pulumi.Input[str] account_id: The ID of the target account when managing member accounts. Will manage current user's account by default if omitted.
:param pulumi.Input[str] name: The name of the alternate contact.
"""
pulumi.set(__self__, "alternate_contact_type", alternate_contact_type)
pulumi.set(__self__, "email_address", email_address)
pulumi.set(__self__, "phone_number", phone_number)
pulumi.set(__self__, "title", title)
if account_id is not None:
pulumi.set(__self__, "account_id", account_id)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="alternateContactType")
def alternate_contact_type(self) -> pulumi.Input[str]:
"""
The type of the alternate contact. Allowed values are: `BILLING`, `OPERATIONS`, `SECURITY`.
"""
return pulumi.get(self, "alternate_contact_type")
@alternate_contact_type.setter
def alternate_contact_type(self, value: pulumi.Input[str]):
pulumi.set(self, "alternate_contact_type", value)
@property
@pulumi.getter(name="emailAddress")
def email_address(self) -> pulumi.Input[str]:
"""
An email address for the alternate contact.
"""
return pulumi.get(self, "email_address")
@email_address.setter
def email_address(self, value: pulumi.Input[str]):
pulumi.set(self, "email_address", value)
@property
@pulumi.getter(name="phoneNumber")
def phone_number(self) -> pulumi.Input[str]:
"""
A phone number for the alternate contact.
"""
return pulumi.get(self, "phone_number")
@phone_number.setter
def phone_number(self, value: pulumi.Input[str]):
pulumi.set(self, "phone_number", value)
@property
@pulumi.getter
def title(self) -> pulumi.Input[str]:
"""
A title for the alternate contact.
"""
return pulumi.get(self, "title")
@title.setter
def title(self, value: pulumi.Input[str]):
pulumi.set(self, "title", value)
@property
@pulumi.getter(name="accountId")
def account_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the target account when managing member accounts. Will manage current user's account by default if omitted.
"""
return pulumi.get(self, "account_id")
@account_id.setter
def account_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "account_id", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the alternate contact.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@pulumi.input_type
class _AlternativeContactState:
def __init__(__self__, *,
account_id: Optional[pulumi.Input[str]] = None,
alternate_contact_type: Optional[pulumi.Input[str]] = None,
email_address: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
phone_number: Optional[pulumi.Input[str]] = None,
title: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering AlternativeContact resources.
:param pulumi.Input[str] account_id: The ID of the target account when managing member accounts. Will manage current user's account by default if omitted.
:param pulumi.Input[str] alternate_contact_type: The type of the alternate contact. Allowed values are: `BILLING`, `OPERATIONS`, `SECURITY`.
:param pulumi.Input[str] email_address: An email address for the alternate contact.
:param pulumi.Input[str] name: The name of the alternate contact.
:param pulumi.Input[str] phone_number: A phone number for the alternate contact.
:param pulumi.Input[str] title: A title for the alternate contact.
"""
if account_id is not None:
pulumi.set(__self__, "account_id", account_id)
if alternate_contact_type is not None:
pulumi.set(__self__, "alternate_contact_type", alternate_contact_type)
if email_address is not None:
pulumi.set(__self__, "email_address", email_address)
if name is not None:
pulumi.set(__self__, "name", name)
if phone_number is not None:
pulumi.set(__self__, "phone_number", phone_number)
if title is not None:
pulumi.set(__self__, "title", title)
@property
@pulumi.getter(name="accountId")
def account_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the target account when managing member accounts. Will manage current user's account by default if omitted.
"""
return pulumi.get(self, "account_id")
@account_id.setter
def account_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "account_id", value)
@property
@pulumi.getter(name="alternateContactType")
def alternate_contact_type(self) -> Optional[pulumi.Input[str]]:
"""
The type of the alternate contact. Allowed values are: `BILLING`, `OPERATIONS`, `SECURITY`.
"""
return pulumi.get(self, "alternate_contact_type")
@alternate_contact_type.setter
def alternate_contact_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "alternate_contact_type", value)
@property
@pulumi.getter(name="emailAddress")
def email_address(self) -> Optional[pulumi.Input[str]]:
"""
An email address for the alternate contact.
"""
return pulumi.get(self, "email_address")
@email_address.setter
def email_address(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "email_address", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the alternate contact.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="phoneNumber")
def phone_number(self) -> Optional[pulumi.Input[str]]:
"""
A phone number for the alternate contact.
"""
return pulumi.get(self, "phone_number")
@phone_number.setter
def phone_number(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "phone_number", value)
@property
@pulumi.getter
def title(self) -> Optional[pulumi.Input[str]]:
"""
A title for the alternate contact.
"""
return pulumi.get(self, "title")
@title.setter
def title(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "title", value)
class AlternativeContact(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_id: Optional[pulumi.Input[str]] = None,
alternate_contact_type: Optional[pulumi.Input[str]] = None,
email_address: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
phone_number: Optional[pulumi.Input[str]] = None,
title: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Manages the specified alternate contact attached to an AWS Account.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
operations = aws.account.AlternativeContact("operations",
alternate_contact_type="OPERATIONS",
email_address="<EMAIL>",
phone_number="+1234567890",
title="Example")
```
## Import
The current Alternate Contact can be imported using the `alternate_contact_type`, e.g.,
```sh
$ pulumi import aws:account/alternativeContact:AlternativeContact operations OPERATIONS
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] account_id: The ID of the target account when managing member accounts. Will manage current user's account by default if omitted.
:param pulumi.Input[str] alternate_contact_type: The type of the alternate contact. Allowed values are: `BILLING`, `OPERATIONS`, `SECURITY`.
:param pulumi.Input[str] email_address: An email address for the alternate contact.
:param pulumi.Input[str] name: The name of the alternate contact.
:param pulumi.Input[str] phone_number: A phone number for the alternate contact.
:param pulumi.Input[str] title: A title for the alternate contact.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: AlternativeContactArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages the specified alternate contact attached to an AWS Account.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
operations = aws.account.AlternativeContact("operations",
alternate_contact_type="OPERATIONS",
email_address="<EMAIL>",
phone_number="+1234567890",
title="Example")
```
## Import
The current Alternate Contact can be imported using the `alternate_contact_type`, e.g.,
```sh
$ pulumi import aws:account/alternativeContact:AlternativeContact operations OPERATIONS
```
:param str resource_name: The name of the resource.
:param AlternativeContactArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(AlternativeContactArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_id: Optional[pulumi.Input[str]] = None,
alternate_contact_type: Optional[pulumi.Input[str]] = None,
email_address: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
phone_number: Optional[pulumi.Input[str]] = None,
title: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = AlternativeContactArgs.__new__(AlternativeContactArgs)
__props__.__dict__["account_id"] = account_id
if alternate_contact_type is None and not opts.urn:
raise TypeError("Missing required property 'alternate_contact_type'")
__props__.__dict__["alternate_contact_type"] = alternate_contact_type
if email_address is None and not opts.urn:
raise TypeError("Missing required property 'email_address'")
__props__.__dict__["email_address"] = email_address
__props__.__dict__["name"] = name
if phone_number is None and not opts.urn:
raise TypeError("Missing required property 'phone_number'")
__props__.__dict__["phone_number"] = phone_number
if title is None and not opts.urn:
raise TypeError("Missing required property 'title'")
__props__.__dict__["title"] = title
super(AlternativeContact, __self__).__init__(
'aws:account/alternativeContact:AlternativeContact',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
account_id: Optional[pulumi.Input[str]] = None,
alternate_contact_type: Optional[pulumi.Input[str]] = None,
email_address: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
phone_number: Optional[pulumi.Input[str]] = None,
title: Optional[pulumi.Input[str]] = None) -> 'AlternativeContact':
"""
Get an existing AlternativeContact resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] account_id: The ID of the target account when managing member accounts. Will manage current user's account by default if omitted.
:param pulumi.Input[str] alternate_contact_type: The type of the alternate contact. Allowed values are: `BILLING`, `OPERATIONS`, `SECURITY`.
:param pulumi.Input[str] email_address: An email address for the alternate contact.
:param pulumi.Input[str] name: The name | |
<reponame>jg-rp/liquid
"""Test cases for string filters."""
# pylint: disable=too-many-public-methods,too-many-lines,missing-class-docstring
import unittest
from functools import partial
from inspect import isclass
from typing import NamedTuple
from typing import Any
from typing import List
from typing import Dict
from liquid.environment import Environment
from liquid.exceptions import FilterArgumentError
from liquid.exceptions import FilterValueError
from liquid.exceptions import FilterError
from liquid.builtin.filters.string import capitalize
from liquid.builtin.filters.string import append
from liquid.builtin.filters.string import downcase
from liquid.builtin.filters.string import escape
from liquid.builtin.filters.string import escape_once
from liquid.builtin.filters.string import lstrip
from liquid.builtin.filters.string import newline_to_br
from liquid.builtin.filters.string import prepend
from liquid.builtin.filters.string import remove
from liquid.builtin.filters.string import remove_first
from liquid.builtin.filters.string import replace
from liquid.builtin.filters.string import replace_first
from liquid.builtin.filters.string import slice_
from liquid.builtin.filters.string import split
from liquid.builtin.filters.string import upcase
from liquid.builtin.filters.string import strip
from liquid.builtin.filters.string import rstrip
from liquid.builtin.filters.string import strip_html
from liquid.builtin.filters.string import strip_newlines
from liquid.builtin.filters.string import truncate
from liquid.builtin.filters.string import truncatewords
from liquid.builtin.filters.string import url_encode
from liquid.builtin.filters.string import url_decode
from liquid.builtin.filters.string import base64_encode
from liquid.builtin.filters.string import base64_decode
from liquid.builtin.filters.string import base64_url_safe_encode
from liquid.builtin.filters.string import base64_url_safe_decode
class Case(NamedTuple):
description: str
val: Any
args: List[Any]
kwargs: Dict[Any, Any]
expect: Any
class StringFilterTestCase(unittest.TestCase):
"""Test string filter functions."""
def setUp(self) -> None:
self.env = Environment()
def _test(self, func, test_cases):
if getattr(func, "with_environment", False):
func = partial(func, environment=self.env)
for case in test_cases:
with self.subTest(msg=case.description):
if isclass(case.expect) and issubclass(
case.expect, (FilterArgumentError, FilterValueError, FilterError)
):
with self.assertRaises(case.expect):
func(case.val, *case.args, **case.kwargs)
else:
self.assertEqual(
func(case.val, *case.args, **case.kwargs), case.expect
)
def test_capitalize(self):
"""Test capitalize filter function."""
test_cases = [
Case(
description="lower case string",
val="hello",
args=[],
kwargs={},
expect="Hello",
),
Case(
description="already capitalized string",
val="Hello",
args=[],
kwargs={},
expect="Hello",
),
Case(
description="unexpected argument",
val="hello",
args=[2],
kwargs={},
expect=FilterArgumentError,
),
Case(
description="undefined left value",
val=self.env.undefined("test"),
args=[],
kwargs={},
expect="",
),
]
self._test(capitalize, test_cases)
def test_append(self):
"""Test append filter function."""
test_cases = [
Case(
description="concat",
val="hello",
args=["there"],
kwargs={},
expect="hellothere",
),
Case(
description="not a string",
val=5,
args=["there"],
kwargs={},
expect="5there",
),
Case(
description="argument not a string",
val="hello",
args=[5],
kwargs={},
expect="hello5",
),
Case(
description="missing argument",
val="hello",
args=[],
kwargs={},
expect=FilterArgumentError,
),
Case(
description="too many arguments",
val="hello",
args=["how", "are", "you"],
kwargs={},
expect=FilterArgumentError,
),
Case(
description="undefined left value",
val=self.env.undefined("test"),
args=["hi"],
kwargs={},
expect="hi",
),
Case(
description="undefined argument",
val="hi",
args=[self.env.undefined("test")],
kwargs={},
expect="hi",
),
]
self._test(append, test_cases)
def test_downcase(self):
"""Test downcase filter function."""
test_cases = [
Case(
description="make lower case",
val="HELLO",
args=[],
kwargs={},
expect="hello",
),
Case(
description="not a string",
val=5,
args=[],
kwargs={},
expect="5",
),
Case(
description="unexpected argument",
val="HELLO",
args=[5],
kwargs={},
expect=FilterArgumentError,
),
Case(
description="undefined left value",
val=self.env.undefined("test"),
args=[],
kwargs={},
expect="",
),
]
self._test(downcase, test_cases)
def test_escape(self):
"""Test escape filter function."""
test_cases = [
Case(
description="make HTML-safe",
val="<p>test</p>",
args=[],
kwargs={},
expect="<p>test</p>",
),
Case(
description="not a string",
val=5,
args=[],
kwargs={},
expect="5",
),
Case(
description="unexpected argument",
val="HELLO",
args=[5],
kwargs={},
expect=FilterArgumentError,
),
Case(
description="undefined left value",
val=self.env.undefined("test"),
args=[],
kwargs={},
expect="",
),
]
self._test(escape, test_cases)
def test_escape_once(self):
"""Test escape_once filter function."""
test_cases = [
Case(
description="make HTML-safe",
val="<p>test</p>",
args=[],
kwargs={},
expect="<p>test</p>",
),
Case(
description="make HTML-safe from mixed safe and markup.",
val="<p>test</p><p>test</p>",
args=[],
kwargs={},
expect="<p>test</p><p>test</p>",
),
Case(
description="not a string",
val=5,
args=[],
kwargs={},
expect="5",
),
Case(
description="unexpected argument",
val="HELLO",
args=[5],
kwargs={},
expect=FilterArgumentError,
),
Case(
description="undefined left value",
val=self.env.undefined("test"),
args=[],
kwargs={},
expect="",
),
]
self._test(escape_once, test_cases)
def test_lstrip(self):
"""Test lstrip filter function."""
test_cases = [
Case(
description="left padded",
val=" \t\r\n hello",
args=[],
kwargs={},
expect="hello",
),
Case(
description="right padded",
val="hello \t\r\n ",
args=[],
kwargs={},
expect="hello \t\r\n ",
),
Case(
description="left and right padded",
val=" \t\r\n hello \t\r\n ",
args=[],
kwargs={},
expect="hello \t\r\n ",
),
Case(
description="not a string",
val=5,
args=[],
kwargs={},
expect="5",
),
Case(
description="unexpected argument",
val="hello",
args=[5],
kwargs={},
expect=FilterArgumentError,
),
Case(
description="undefined left value",
val=self.env.undefined("test"),
args=[],
kwargs={},
expect="",
),
]
self._test(lstrip, test_cases)
def test_newline_to_br(self):
"""Test newline_to_br filter function."""
test_cases = [
Case(
description="string with newlines",
val="- apples\n- oranges\n",
args=[],
kwargs={},
expect="- apples<br />\n- oranges<br />\n",
),
Case(
description="not a string",
val=5,
args=[],
kwargs={},
expect="5",
),
Case(
description="unexpected argument",
val="hello",
args=[5],
kwargs={},
expect=FilterArgumentError,
),
Case(
description="reference implementation test 1",
val="a\nb\nc",
args=[],
kwargs={},
expect="a<br />\nb<br />\nc",
),
Case(
description="reference implementation test 2",
val="a\r\nb\nc",
args=[],
kwargs={},
expect="a<br />\nb<br />\nc",
),
Case(
description="undefined left value",
val=self.env.undefined("test"),
args=[],
kwargs={},
expect="",
),
]
self._test(newline_to_br, test_cases)
def test_prepend(self):
"""Test prepend filter function."""
test_cases = [
Case(
description="concat",
val="hello",
args=["there"],
kwargs={},
expect="therehello",
),
Case(
description="not a string",
val=5,
args=["there"],
kwargs={},
expect="there5",
),
Case(
description="argument not a string",
val="hello",
args=[5],
kwargs={},
expect="5hello",
),
Case(
description="missing argument",
val="hello",
args=[],
kwargs={},
expect=FilterArgumentError,
),
Case(
description="too many arguments",
val="hello",
args=["how", "are", "you"],
kwargs={},
expect=FilterArgumentError,
),
Case(
description="undefined left value",
val=self.env.undefined("test"),
args=["hi"],
kwargs={},
expect="hi",
),
Case(
description="undefined argument",
val="hi",
args=[self.env.undefined("test")],
kwargs={},
expect="hi",
),
]
self._test(prepend, test_cases)
def test_remove(self):
"""Test remove filter function."""
test_cases = [
Case(
description="remove substrings",
val="I strained to see the train through the rain",
args=["rain"],
kwargs={},
expect="I sted to see the t through the ",
),
Case(
description="not a string",
val=5,
args=["there"],
kwargs={},
expect="5",
),
Case(
description="argument not a string",
val="hello",
args=[5],
kwargs={},
expect="hello",
),
Case(
description="missing argument",
val="hello",
args=[],
kwargs={},
expect=FilterArgumentError,
),
Case(
description="too many arguments",
val="hello",
args=["how", "are", "you"],
kwargs={},
expect=FilterArgumentError,
),
Case(
description="undefined left value",
val=self.env.undefined("test"),
args=["rain"],
kwargs={},
expect="",
),
Case(
description="undefined argument",
val="I strained to see the train through the rain",
args=[self.env.undefined("test")],
kwargs={},
expect="I strained to see the train through the rain",
),
]
self._test(remove, test_cases)
def test_remove_first(self):
"""Test remove_first filter function."""
test_cases = [
Case(
description="remove substrings",
val="I strained to see the train through the rain",
args=["rain"],
kwargs={},
expect="I sted to see the train through the rain",
),
Case(
description="not a string",
val=5,
args=["rain"],
kwargs={},
expect="5",
),
Case(
description="argument not a string",
val="hello",
args=[5],
kwargs={},
expect="hello",
),
Case(
description="missing argument",
val="hello",
args=[],
kwargs={},
expect=FilterArgumentError,
),
Case(
description="too many arguments",
val="hello",
args=["how", "are", "you"],
kwargs={},
expect=FilterArgumentError,
),
Case(
description="undefined left value",
val=self.env.undefined("test"),
args=["rain"],
kwargs={},
expect="",
),
Case(
description="undefined argument",
val="I strained to see the train through the rain",
args=[self.env.undefined("test")],
kwargs={},
expect="I strained to see the train through the rain",
),
]
self._test(remove_first, test_cases)
def test_replace(self):
"""Test replace filter function."""
test_cases = [
Case(
description="replace substrings",
val="Take my protein pills and put my helmet on",
args=["my", "your"],
kwargs={},
expect="Take your protein pills and put your helmet on",
),
Case(
description="not a string",
val=5,
args=["rain", "foo"],
kwargs={},
expect="5",
),
Case(
description="argument not a string",
val="hello",
args=[5, "your"],
kwargs={},
expect="hello",
),
Case(
description="missing argument",
val="hello",
args=["ll"],
kwargs={},
expect="heo",
),
Case(
description="missing arguments",
val="hello",
args=[],
kwargs={},
expect=FilterArgumentError,
),
Case(
description="too many arguments",
val="hello",
args=["how", "are", "you"],
kwargs={},
expect=FilterArgumentError,
),
Case(
description="undefined left value",
val=self.env.undefined("test"),
args=["my", "your"],
kwargs={},
expect="",
),
Case(
description="undefined first argument",
val="Take my protein",
args=[self.env.undefined("test"), "#"],
kwargs={},
expect="#T#a#k#e# #m#y# #p#r#o#t#e#i#n#",
),
Case(
description="undefined second argument",
val="Take my protein pills and put my helmet on",
args=["my", self.env.undefined("test")],
kwargs={},
expect="Take protein pills and put helmet on",
),
]
self._test(replace, test_cases)
def test_replace_first(self):
"""Test replace_first filter function."""
test_cases = [
Case(
description="replace substrings",
val="Take my protein pills and put my helmet on",
args=["my", "your"],
kwargs={},
expect="Take your protein pills and put my helmet on",
),
Case(
description="not a string",
val=5,
args=["rain", "foo"],
kwargs={},
expect="5",
),
Case(
description="argument not a string",
val="hello5",
args=[5, "your"],
kwargs={},
expect="helloyour",
),
Case(
description="missing argument",
val="hello",
args=["ll"],
kwargs={},
expect="heo",
),
Case(
description="missing arguments",
val="hello",
args=[],
kwargs={},
expect=FilterArgumentError,
),
Case(
description="too many arguments",
val="hello",
args=["how", "are", "you"],
kwargs={},
expect=FilterArgumentError,
),
Case(
description="undefined left value",
val=self.env.undefined("test"),
args=["my", "your"],
kwargs={},
expect="",
),
Case(
description="undefined first argument",
val="Take my protein pills and put my helmet on",
args=[self.env.undefined("test"), "your"],
kwargs={},
expect="yourTake my protein pills and put my helmet on",
),
Case(
description="undefined second argument",
val="Take my protein pills and put my helmet on",
args=["my", self.env.undefined("test")],
kwargs={},
expect="Take protein pills and put my helmet on",
),
]
self._test(replace_first, test_cases)
def test_slice(self):
"""Test slice filter function."""
test_cases = [
Case(
description="zero",
val="hello",
args=[0],
kwargs={},
expect="h",
),
Case(
description="one",
val="hello",
args=[1],
kwargs={},
expect="e",
),
Case(
description="one length three",
val="hello",
args=[1, 3],
kwargs={},
expect="ell",
),
Case(
description="out of range",
val="hello",
args=[99],
kwargs={},
expect="",
),
Case(
description="not a string",
val=5,
args=[0],
kwargs={},
expect="5",
),
Case(
description="first argument not an integer",
val="hello",
args=["foo"],
kwargs={},
expect=FilterArgumentError,
),
Case(
description="second argument not an integer",
val="hello",
args=[5, "foo"],
kwargs={},
expect=FilterArgumentError,
),
Case(
description="missing arguments",
val="hello",
args=[],
kwargs={},
expect=FilterArgumentError,
),
Case(
description="too many arguments",
val="hello",
args=[1, 2, 3],
kwargs={},
expect=FilterArgumentError,
),
Case(
description="unexpected keyword arguments",
val="hello",
args=[1, 2],
kwargs={"x": "y"},
expect=FilterArgumentError,
),
Case(
| |
"Wishy-Washy Blue": 0xC6E0E1,
"Wishy-Washy Brown": 0xD1C2C2,
"Wishy-Washy Green": 0xDFEAE1,
"Wishy-Washy Lichen": 0xDEEDE4,
"Wishy-Washy Lilies": 0xF5DFE7,
"Wishy-Washy Lime": 0xEEF5DB,
"Wishy-Washy Mauve": 0xEDDDE4,
"Wishy-Washy Mint": 0xDDE2D9,
"Wishy-Washy Pink": 0xF0DEE7,
"Wishy-Washy Red": 0xE1DADD,
"Wishy-Washy Yellow": 0xE9E9D5,
"Wisley Pink": 0xF2A599,
"Wisp": 0xA9BADD,
"Wisp of Mauve": 0xD9C7BE,
"Wisp of Smoke": 0xE5E7E9,
"Wisp Pink": 0xF9E8E2,
"Wispy Mauve": 0xC6AEAA,
"Wispy Mint": 0xBCC7A4,
"Wispy Pink": 0xF3EBEA,
"Wispy White": 0xFFC196,
"Wisteria": 0xA87DC2,
"Wisteria Blue": 0x84A2D4,
"Wisteria Fragrance": 0xBBBCDE,
"Wisteria Light Soft Blue": 0xA6A8C5,
"Wisteria Powder": 0xE6C8FF,
"Wisteria Purple": 0x875F9A,
"Wisteria Trellis": 0xB2ADBF,
"Wisteria Yellow": 0xF7C114,
"Wisteria-Wise": 0xB2A7CC,
"Wistful": 0xA29ECD,
"Wistful Beige": 0xEADDD7,
"Wistful Mauve": 0x946C74,
"Wistman's Wood": 0xAA9966,
"Witch Hazel": 0xFBF073,
"Witch Hazel Leaf": 0x8E8976,
"Witch Soup": 0x692746,
"Witch Wart": 0x113300,
"Witch Wood": 0x7C4A33,
"Witchcraft": 0x474C50,
"Witches Cauldron": 0x35343F,
"With A Twist": 0xD1D1BB,
"With the Grain": 0xBCA380,
"Withered Rose": 0xA26666,
"Witness": 0x90C0C9,
"Witty Green": 0xB1D99D,
"Wizard": 0x4D5B88,
"Wizard Blue": 0x0073CF,
"Wizard Grey": 0x525E68,
"Wizard Time": 0x6D4660,
"Wizard White": 0xDFF1FD,
"Wizard's Brew": 0xA090B8,
"Wizard's Potion": 0x5D6098,
"Wizard's Spell": 0x584B4E,
"Woad Blue": 0x597FB9,
"Woad Indigo": 0x6C9898,
"Woad Purple": 0x584769,
"Wobbegong Brown": 0xC19A6B,
"Wolf Lichen": 0xA8FF04,
"Wolf's Bane": 0x3D343F,
"Wolf's Fur": 0x5C5451,
"Wolfram": 0x7D8574,
"Wolverine": 0x91989D,
"Wonder Land": 0x92ADB2,
"Wonder Lust": 0xEF8E9F,
"Wonder Violet": 0xA085A6,
"Wonder Wine": 0x635D63,
"Wonder Wish": 0xA97898,
"Wonder Woods": 0xABCB7B,
"Wondrous Blue": 0xB8CDDD,
"Wonton Dumpling": 0xD0A46D,
"Wood Acres": 0x645A56,
"Wood Ash": 0xD7CAB0,
"Wood Avens": 0xFBEEAC,
"Wood Bark": 0x302621,
"Wood Brown": 0x554545,
"Wood Charcoal": 0x464646,
"Wood Chi": 0x90835E,
"Wood Garlic": 0x7A7229,
"Wood Green": 0xA78C59,
"Wood Lake": 0xA08475,
"Wood Nymph": 0xEBA0A7,
"Wood Pigeon": 0xAABBCC,
"Wood Stain Brown": 0x796A4E,
"Wood Thrush": 0xA47D43,
"Wood Violet": 0x75406A,
"Wood-Black Red": 0x584043,
"Wood's Creek": 0x61633F,
"Woodbine": 0x7B7F32,
"Woodbridge": 0x847451,
"Woodbridge Trail": 0xB3987D,
"Woodburn": 0x463629,
"Woodchuck": 0x8E746C,
"Woodcraft": 0x8F847A,
"Wooded Acre": 0xB59B7E,
"Wooden Cabin": 0x765A3F,
"Wooden Nutmeg": 0x745C51,
"Wooden Peg": 0xA89983,
"Wooden Swing": 0xA58563,
"Woodgrain": 0x996633,
"Woodland": 0x626746,
"Woodland Brown": 0x5F4737,
"Woodland Grass": 0x004400,
"Woodland Moss": 0x5C645C,
"Woodland Nymph": 0x69804B,
"Woodland Sage": 0xA4A393,
"Woodland Walk": 0x8B8D63,
"Woodlawn Green": 0x405B50,
"Woodrose": 0xAE8C8E,
"Woodruff Green": 0x8B9916,
"Woodrush": 0x45402B,
"Woodsmoke": 0x2B3230,
"Woodstock Rose": 0xE9CAB6,
"Woodsy Brown": 0x3D271D,
"Woodward Park": 0x755F4A,
"Wooed": 0x40446C,
"Woohringa": 0x5F655A,
"Wool Skein": 0xD9CFBA,
"Wool Turquoise": 0x005152,
"Wool Tweed": 0x917747,
"Wool Violet": 0x5E5587,
"Wool White": 0xF9EDE4,
"Wool-Milk Pig": 0xDBBDAA,
"Woolen Mittens": 0xB59F55,
"Woolen Vest": 0xB0A582,
"Woolly Beige": 0xE7D5C9,
"Woolly Mammoth": 0xBB9C7C,
"Wooly Thyme": 0x907E63,
"Wooster Smoke": 0xA5A192,
"Worcestershire Sauce": 0x572B26,
"Work Blue": 0x004D67,
"Workout Green": 0xBFE6D2,
"Workout Routine": 0xFFD789,
"Workshop Blue": 0x02667B,
"World Peace": 0x005477,
"Worldly Gray": 0xCEC6BF,
"Wormwood Green": 0x9FAE9E,
"Worn Denim": 0x4282C6,
"Worn Jade Tiles": 0xD4DED4,
"Worn Khaki": 0xA69C81,
"Worn Olive": 0x6F6C0A,
"Worn Silver": 0xC9C0BB,
"Worn Wood": 0xE8DBD3,
"Worn Wooden": 0x634333,
"Woven Basket": 0x8E7B58,
"Woven Gold": 0xDCB639,
"Woven Navajo": 0xCEAD8E,
"Woven Reed": 0xDDDCBF,
"Woven Straw": 0xC1AC8B,
"Woven Wicker": 0xB99974,
"Wrack White": 0xECEAD0,
"Wreath": 0x76856A,
"Wren": 0x4A4139,
"Wright Brown": 0x71635E,
"Writer's Parchment": 0xE9D6BD,
"Writing Paper": 0xF1E6CF,
"Wrought Iron": 0x999E98,
"Wrought Iron Gate": 0x474749,
"Wu-Tang Gold": 0xF8D106,
"Wulfenite": 0xCE7639,
"Wyvern Green": 0x86A96F,
"Xâkestari White": 0xFEF2DC,
"Xanadu": 0x738678,
"Xanthe Yellow": 0xFFEE55,
"Xanthous": 0xF1B42F,
"Xavier Blue": 0x6AB4E0,
"Xena": 0x847E54,
"Xenon Blue": 0xB7C0D7,
"Xereus Purple": 0x7D0061,
"Xiān Hóng Red": 0xE60626,
"Xiàng Yá Bái Ivory": 0xECE6D1,
"Xiao Long Bao Dumpling": 0xBCB7B0,
"Xìng Huáng Yellow": 0xFCE166,
"Xīpe Totēc Red": 0xCC1166,
"Xmas Candy": 0x990020,
"Xoxo": 0xF08497,
"XV-88": 0x72491E,
"Y7K Blue": 0x1560FB,
"Yacht Blue": 0x679BB3,
"Yacht Club": 0x566062,
"Yacht Club Blue": 0x485783,
"Yacht Harbor": 0x7C9DAE,
"Yahoo": 0xFABBA9,
"Yale Blue": 0x0F4D92,
"Yam": 0xD0893F,
"Yamabuki Gold": 0xFFA400,
"Yamabukicha Gold": 0xCB7E1F,
"Yān Hūi Smoke": 0xA8C3BC,
"Yanagicha": 0x9C8A4D,
"Yanagizome Green": 0x8C9E5E,
"Yáng Chéng Orange": 0xF1A141,
"Yang Mist": 0xEDE8DD,
"Yankee Doodle": 0x4D5A6B,
"Yankees Blue": 0x1C2841,
"Yardbird": 0x9E826A,
"Yarmouth Oyster": 0xDCCFB6,
"Yarrow": 0xD8AD39,
"Yawl": 0x547497,
"Yearling": 0xAD896A,
"Yearning": 0x061088,
"Yell Yellow": 0xFFFFBF,
"Yellow": 0xFFFF00,
"Yellow Acorn": 0xB68D4C,
"Yellow Avarice": 0xF5F5D9,
"Yellow Beam": 0xF9EED0,
"Yellow Beige": 0xE3C08D,
"Yellow Bell Pepper": 0xFFDD33,
"Yellow Bird": 0xF1CD7B,
"Yellow Blitz": 0xFDF4BB,
"Yellow Bombinate": 0xFAF3CF,
"Yellow Bonnet": 0xF9F6E8,
"Yellow Brick Road": 0xEAC853,
"Yellow Brown": 0xAE8B0C,
"Yellow Buzzing": 0xEEDD11,
"Yellow Canary": 0xFFEAAC,
"Yellow Cattleya": 0xFFF44F,
"Yellow Chalk": 0xF5F9AD,
"Yellow Coneflower": 0xEDB856,
"Yellow Corn": 0xFFDE88,
"Yellow Cream": 0xEFDC75,
"Yellow Currant": 0xF7C66B,
"Yellow Diamond": 0xF6F1D7,
"Yellow Dragon": 0xF8E47E,
"Yellow Emulsion": 0xF0F0D9,
"Yellow Endive": 0xD2CC81,
"Yellow Flash": 0xFFCA00,
"Yellow Geranium": 0xFFE1A0,
"Yellow Gold": 0xBE8400,
"Yellow Green": 0xC8FD3D,
"Yellow Green Shade": 0xC5E384,
"Yellow Groove": 0xF7B930,
"Yellow Iris": 0xEEE78E,
"Yellow Jacket": 0xFFCC3A,
"Yellow Jasmine": 0xEEE8AA,
"Yellow Jasper": 0xDAA436,
"Yellow Jubilee": 0xFFD379,
"Yellow Lupine": 0xCCAA4D,
"Yellow Maize": 0xC0A85A,
"Yellow Mana": 0xFDFCBF,
"Yellow Mandarin": 0xD28034,
"Yellow Mask": 0xF6D255,
"Yellow Metal": 0x73633E,
"Yellow Nile": 0x95804A,
"Yellow Ocher": 0xC39143,
"Yellow Ochre": 0xCB9D06,
"Yellow Orange": 0xFCB001,
"Yellow Page": 0xEADCC6,
"Yellow Pear": 0xECE99B,
"Yellow Phosphenes": 0xE4E4CB,
"Yellow Polka Dot": 0xFCB867,
"Yellow Powder": 0xFCFD74,
"Yellow Rose": 0xFFF000,
"Yellow Salmonberry": 0xFFF47C,
"Yellow Sand": 0xA28744,
"Yellow Sea": 0xF49F35,
"Yellow Shimmer": 0xF8E2CA,
"Yellow Shout": 0xD19932,
"Yellow Stagshorn": 0xFADA5E,
"Yellow Submarine": 0xFFFF14,
"Yellow Summer": 0xF9B500,
"Yellow Sunshine": 0xFFF601,
"Yellow Tail": 0xFFF29D,
"Yellow Tan": 0xFFE36E,
"Yellow Tang": 0xFFD300,
"Yellow Trumpet": 0xF9D988,
"Yellow Umbrella": 0xCDBB63,
"Yellow Urn Orchid": 0xFFFDD0,
"Yellow Varnish": 0xEAB565,
"Yellow Warbler": 0xFFBA6F,
"Yellow Warning": 0xC69035,
"Yellow Wax Pepper": 0xEDE5B7,
"Yellow Yarn": 0xFEF6BE,
"Yellow-Bellied": 0xFFEE33,
"Yellow-Green Grosbeak": 0xC8CD37,
"Yellow-Rumped Warbler": 0xEEBB77,
"Yellowed Bone": 0xF6F1C4,
"Yellowish": 0xFAEE66,
"Yellowish Brown": 0x9B7A01,
"Yellowish Green": 0xB0DD16,
"Yellowish Grey": 0xEDEEDA,
"Yellowish Orange": 0xFFAB0F,
"Yellowish Tan": 0xFCFC81,
"Yellowish White": 0xE9F1D0,
"Yellowstone": 0xCEB736,
"Yellowstone Park": 0xE4D6BA,
"Yellowy Green": 0xBFF128,
"Yeti Footprint": 0xC7D7E0,
"Yín Bái Silver": 0xE0E1E2,
"Yin Hūi Silver": 0x848999,
"Yin Mist": 0x3B3C3C,
"Yín Sè Silver": 0xB1C4CB,
"Yíng Guāng Sè Green": 0x05FFA6,
"Yíng Guāng Sè Pink": 0xFF69AF,
"Yíng Guāng Sè Purple": 0x632DE9,
"YInMn Blue": 0x2E5090,
"Yippie Ya Yellow": 0xF9F59F,
"Yippie Yellow": 0xFFFF84,
"Yoga Daze": 0xE3E4D2,
"Yogi": 0x8A8C66,
"Yogurt": 0xFFECC3,
"Yolanda": 0xA291BA,
"Yolande": 0xD5A585,
"Yolk": 0xEEC701,
"Yolk Yellow": 0xE2B051,
"York Bisque": 0xF3D9C7,
"York Pink": 0xD7837F,
"York Plum": 0xD3BFE5,
"York River Green": 0x67706D,
"Yorkshire Brown": 0x735C53,
"Yorkshire Cloud": 0xBAC3CC,
"Yoshi": 0x55AA00,
"You're Blushing": 0xE2CAAF,
"Young Apricot": 0xFCD8B5,
"Young At Heart": 0xD5A1A9,
"Young Bamboo": 0x68BE8D,
"Young Bud": 0x86AF38,
"Young Colt": 0x938C83,
"Young Cornflower": 0xBBFFFF,
"Young Crab": 0xF6A09D,
"Young Fawn": 0xC3B4B3,
"Young Fern": 0x71BC78,
"Young Gecko": 0xAAC0AD,
"Young Grass": 0xC3D825,
"Young Green": 0x97D499,
"Young Green Onion": 0xAACF53,
"Young Greens": 0xD8E698,
"Young Leaf": 0xB0C86F,
"Young Leaves": 0xB9D08B,
"Young Mahogany": 0xCA3435,
"Young Night": 0x232323,
"Young Peach": 0xF2E1D2,
"Young Plum": 0xACC729,
"Young Prince": 0xB28EBC,
"Young Purple": 0xBC64A4,
"Young Redwood": 0xAB4E52,
"Young Salmon": 0xFFB6B4,
"Young Tangerine": 0xFFA474,
"Young Turk": 0xC9AFA9,
"Young Wheat": 0xE1E3A9,
"Your Majesty": 0x61496E,
"Your Pink": 0xFFC5BB,
"Your Shadow": 0x787E93,
"Youth": 0xE2C9C8,
"Youthful Coral": 0xEE8073,
"Yreka!": 0xA7B3B7,
"Yriel Yellow": 0xFFDB58,
"Yù Shí Bái White": 0xC0E2E1,
"Yucatan": 0xE9AF78,
"Yucatan White Habanero": 0xF2EFE0,
"Yucca": 0x75978F,
"Yucca Cream": 0xA1D7C9,
"Yucca White": 0xF2EAD5,
"Yuè Guāng Lán Blue": 0x2138AB,
"Yuè Guāng Lán Moonlight": 0x5959AB,
"Yukon Gold": 0x826A21,
"Yule Tree": 0x66B032,
"Yuma": 0xC7B882,
"Yuma Gold": 0xFFD678,
"Yuma Sand": 0xCFC5AE,
"Yuzu Jam": 0xFDD200,
"Yuzu Soy": 0x112200,
"Yves Klein Blue": 0x00008B,
"Zaffre": 0x0014A8,
"Zahri Pink": 0xEC6D71,
"Zambezi": 0x6B5A5A,
"Zambia": 0xFF990E,
"Zamesi Desert": 0xDDA026,
"Zanah": 0xB2C6B1,
"Zanci": 0xD38977,
"Zandri Dust": 0xA39A61,
"Zangief's Chest": 0x823C3D,
"Zany Pink": 0xE47486,
"Zanzibar": 0x7E6765,
"Zǎo Hóng Maroon": 0xC1264C,
"Zappy Zebra": 0xF1F3F3,
"Zard Yellow": 0xFDE634,
"Zatar Leaf": 0x60A448,
"Zebra Finch": 0xCEC6BB,
"Zebra Grass": 0x9DA286,
"Zeftron": 0x0090AD,
"Zelyony Green": 0x016612,
"Zen": 0xCFD9DE,
"Zen Blue": 0x9FA9BE,
"Zen Essence": 0xC6BFA7,
"Zen Garden": 0xD1DAC0,
"Zen Retreat": 0x5B5D5C,
"Zenith": 0x497A9F,
"Zenith Heights": 0xA6C8C7,
"Zephyr": 0xC89FA5,
"Zephyr Blue": 0xD3D9D1,
"Zephyr Green": 0x7CB083,
"Zero Degrees": 0xDDD9C4,
"Zero Gravity": 0x332233,
"Zest": 0xC6723B,
"Zesty Apple": 0x92A360,
"Zeus": 0x3B3C38,
"Zeus Palace": 0x3C343D,
"Zeus Purple": 0x660077,
"Zeus Temple": 0x6C94CD,
"Zheleznogorsk Yellow": 0xFEF200,
"Zhēn Zhū Bái Pearl": 0xF8F8F9,
"Zhohltyi Yellow": 0xE4C500,
"Zhū Hóng Vermillion": 0xCB464A,
"Zǐ Lúo Lán Sè Violet": 0x9F0FEF,
"Zǐ Sè Purple": 0xC94CBE,
"Zia Olive": 0x082903,
"Ziggurat": 0x81A6AA,
"Zima Blue": 0x16B8F3,
"Zimidar": 0x6A5287,
"Zin Cluster": 0x463B3A,
"Zinc": 0x92898A,
"Zinc Blend": 0xA3907E,
"Zinc Dust": 0x5B5C5A,
"Zinc Grey": 0x655B55,
"Zinc Luster": 0x8C8373,
"Zinfandel": 0x5C2935,
"Zinfandel Red": 0x5A3844,
"Zing": 0xFBC17B,
"Zingiber": 0xDAC01A,
"Zinnia": 0xFFA010,
"Zinnia Gold": 0xFFD781,
"Zinnwaldite": 0xEBC2AF,
"Zinnwaldite Brown": 0x2C1608,
"Zircon": 0xDEE3E3,
"Zircon Blue": 0x00849D,
"Zircon Grey": 0x807473,
"Zircon Ice": 0xD0E4E5,
"Zitronenzucker": 0xF4F3CD,
"Zodiac Constellation": 0xEE8844,
"Zombie": 0x595A5C,
"Zomp": 0x39A78E,
"Zōng Hóng Red": 0xCA6641,
"Zoom": 0x7B6C74,
"Zorba": 0xA29589,
"Zucchini": 0x17462E,
"Zucchini | |
<gh_stars>0
"""Return the different representations of the probabilistic program.
Transformations are performed on the graphical model, which is then
compiled to the CST by the (universal) compiler.
"""
import copy
from collections import defaultdict
from functools import partial
import libcst as cst
from mcx.core.compiler import compile_graph
from mcx.core.graph import GraphicalModel
from mcx.core.nodes import Op, Placeholder, SampleModelOp, SampleOp
__all__ = [
"logpdf",
"logpdf_contributions",
"sample_predictive",
"sample_joint",
"sample_posterior_predictive",
]
# --------------------------------------------------------------------
# == LOGPDF ==
# --------------------------------------------------------------------
def logpdf(model):
"""Returns a function that computes the log-probability."""
graph = copy.deepcopy(model.graph)
graph = _logpdf_core(graph)
# Create a new `logpdf` node that is the sum of the contributions of each variable.
def to_sum_of_logpdf(*args):
def add(left, right):
return cst.BinaryOperation(left, cst.Add(), right)
args = list(args)
if len(args) == 1:
return cst.Name(args[0].value)
elif len(args) == 2:
left = cst.Name(args[0].value)
right = cst.Name(args[1].value)
return add(left, right)
right = args.pop()
left = args.pop()
expr = add(left, right)
for _ in args:
right = args.pop()
expr = add(expr, right)
return expr
logpdf_contribs = [node for node in graph if isinstance(node, SampleOp)]
sum_node = Op(to_sum_of_logpdf, graph.name, "logpdf", is_returned=True)
graph.add(sum_node, *logpdf_contribs)
return compile_graph(graph, model.namespace, f"{graph.name}_logpdf")
def logpdf_contributions(model):
"""Return the variables' individual constributions to the logpdf.
The function returns a dictionary {'var_name': logpdf_contribution}. When
there are several scopes it returns a nested dictionary {'scope':
{'var_name': logpdf_contribution}} to avoid name conflicts.
We cheat a little here: the function that returns the ast takes the contrib
nodes as arguments, but these are not used: the content of the function is
fully determined before adding the node to the graph. We do not have a
choice because it is currently impossible to pass context (variable name
and scope name) at compilation.
"""
graph = copy.deepcopy(model.graph)
graph = _logpdf_core(graph)
# add a new node, a dictionary that contains the contribution of each
# variable to the log-probability.
logpdf_contribs = [node for node in graph if isinstance(node, SampleOp)]
scopes = set()
scope_map = defaultdict(dict)
for contrib in logpdf_contribs:
var_name = (contrib.name).replace(f"logpdf_{contrib.scope}_", "")
scope_map[contrib.scope][var_name] = contrib.name
scopes.add(contrib.scope)
def to_dictionary_of_contributions(*_):
# if there is only one scope we return a flat dictionary {'var': logpdf_var}
num_scopes = len(scopes)
if num_scopes == 1:
scope = scopes.pop()
return cst.Dict(
[
cst.DictElement(
cst.SimpleString(f"'{var_name}'"), cst.Name(contrib_name)
)
for var_name, contrib_name in scope_map[scope].items()
]
)
# Otherwise we return a nested dictionary where the first level is
# the scope, and then the variables {'model': {}, 'submodel': {}}
return cst.Dict(
[
cst.DictElement(
cst.SimpleString(f"'{scope}'"),
cst.Dict(
[
cst.DictElement(
cst.SimpleString(f"'{var_name}'"),
cst.Name(contrib_name),
)
for var_name, contrib_name in scope_map[scope].items()
]
),
)
for scope in scopes
]
)
dict_node = Op(
to_dictionary_of_contributions,
graph.name,
"logpdf_contributions",
is_returned=True,
)
graph.add(dict_node, *logpdf_contribs)
return compile_graph(graph, model.namespace, f"{graph.name}_logpdf_contribs")
def _logpdf_core(graph: GraphicalModel):
"""Transform the SampleOps to statements that compute the logpdf associated
with the variables' values.
"""
placeholders = []
logpdf_nodes = []
def sampleop_to_logpdf(cst_generator, *args, **kwargs):
name = kwargs.pop("var_name")
return cst.Call(
cst.Attribute(cst_generator(*args, **kwargs), cst.Name("logpdf_sum")),
[cst.Arg(name)],
)
def samplemodelop_to_logpdf(model_name, *args, **kwargs):
name = kwargs.pop("var_name")
return cst.Call(
cst.Attribute(cst.Name(model_name), cst.Name("logpdf")),
list(args) + [cst.Arg(name, star="**")],
)
def placeholder_to_param(name: str):
return cst.Param(cst.Name(name))
for node in graph.random_variables:
if not isinstance(node, SampleModelOp):
continue
rv_name = node.name
returned_var_name = node.graph.returned_variables[0].name
def sample_index(rv, returned_var, *_):
return cst.Subscript(
cst.Name(rv),
[cst.SubscriptElement(cst.SimpleString(f"'{returned_var}'"))],
)
chosen_sample = Op(
partial(sample_index, rv_name, returned_var_name),
graph.name,
f"{rv_name}_value",
)
original_edges = []
data = []
out_nodes = []
for e in graph.out_edges(node):
datum = graph.get_edge_data(*e)
data.append(datum)
original_edges.append(e)
out_nodes.append(e[1])
for e in original_edges:
graph.remove_edge(*e)
graph.add(chosen_sample, node)
for e, d in zip(out_nodes, data):
graph.add_edge(chosen_sample, e, **d)
# We need to loop through the nodes in reverse order because of the compilation
# quirk which makes it that nodes added first to the graph appear first in the
# functions arguments. This should be taken care of properly before merging.
for node in reversed(list(graph.random_variables)):
# Create a new placeholder node with the random variable's name.
# It represents the value that will be passed to the logpdf.
name = node.name
rv_placeholder = Placeholder(
partial(placeholder_to_param, name), name, is_random_variable=True
)
placeholders.append(rv_placeholder)
# Transform the SampleOps from `a <~ Normal(0, 1)` into
# `lopdf_a = Normal(0, 1).logpdf_sum(a)`
if isinstance(node, SampleModelOp):
node.cst_generator = partial(samplemodelop_to_logpdf, node.model_name)
else:
node.cst_generator = partial(sampleop_to_logpdf, node.cst_generator)
node.name = f"logpdf_{node.scope}_{node.name}"
logpdf_nodes.append(node)
for placeholder, node in zip(placeholders, logpdf_nodes):
# Add the placeholder to the graph and link it to the expression that
# computes the logpdf. So far the expression looks like:
#
# >>> logpdf_a = Normal(0, 1).logpdf_sum(_)
#
# `a` is the placeholder and will appear into the arguments of
# the function. Below we assign it to `_`.
graph.add_node(placeholder)
graph.add_edge(placeholder, node, type="kwargs", key=["var_name"])
# Remove edges from the former SampleOp and replace by new placeholder
# For instance, assume that part of our model is:
#
# >>> a <~ Normal(0, 1)
# >>> x = jnp.log(a)
#
# Transformed to a logpdf this would look like:
#
# >>> logpdf_a = Normal(0, 1).logpdf_sum(a)
# >>> x = jnp.log(a)
#
# Where a is now a placeholder, passed as an argument. The following
# code links this placeholder to the expression `jnp.log(a)` and removes
# the edge from `a <~ Normal(0, 1)`.
#
# We cannot remove edges while iterating over the graph, hence the two-step
# process.
# to_remove = []
successors = list(graph.successors(node))
for s in successors:
edge_data = graph.get_edge_data(node, s)
graph.add_edge(placeholder, s, **edge_data)
for s in successors:
graph.remove_edge(node, s)
# The original MCX model may return one or many variables. None of
# these variables should be returned, so we turn the `is_returned` flag
# to `False`.
for node in graph.nodes():
if isinstance(node, Op):
node.is_returned = False
return graph
# -------------------------------------------------------
# == PRIOR SAMPLING ==
# --------------------------------------------------------
def sample_predictive(model):
"""Sample from the model's predictive distribution."""
graph = copy.deepcopy(model.graph)
rng_node = Placeholder(lambda: cst.Param(cst.Name(value="rng_key")), "rng_key")
# Update the SampleOps to return a sample from the distribution so that
# `a <~ Normal(0, 1)` becomes `a = Normal(0, 1).sample(rng_key)`.
def distribution_to_sampler(cst_generator, *args, **kwargs):
rng_key = kwargs.pop("rng_key")
return cst.Call(
func=cst.Attribute(cst_generator(*args, **kwargs), cst.Name("sample")),
args=[cst.Arg(value=rng_key)],
)
def model_to_sampler(model_name, *args, **kwargs):
rng_key = kwargs.pop("rng_key")
return cst.Call(
func=cst.Name(value=model_name), args=[cst.Arg(value=rng_key)] + list(args)
)
random_variables = []
for node in reversed(list(graph.random_variables)):
if isinstance(node, SampleModelOp):
node.cst_generator = partial(model_to_sampler, node.model_name)
else:
node.cst_generator = partial(distribution_to_sampler, node.cst_generator)
random_variables.append(node)
# Link the `rng_key` placeholder to the sampling expressions
graph.add(rng_node)
for var in random_variables:
graph.add_edge(rng_node, var, type="kwargs", key=["rng_key"])
return compile_graph(graph, model.namespace, f"{graph.name}_sample")
def sample_joint(model):
"""Obtain forward samples from the joint distribution defined by the model."""
graph = copy.deepcopy(model.graph)
namespace = model.namespace
def to_dictionary_of_samples(random_variables, *_):
scopes = [rv.scope for rv in random_variables]
names = [rv.name for rv in random_variables]
scoped = defaultdict(dict)
for scope, var_name, var in zip(scopes, names, random_variables):
scoped[scope][var_name] = var
# if there is only one scope (99% of models) we return a flat dictionary
if len(set(scopes)) == 1:
scope = scopes[0]
return cst.Dict(
[
cst.DictElement(
cst.SimpleString(f"'{var_name}'"),
cst.Name(var.name),
)
for var_name, var in scoped[scope].items()
]
)
# Otherwise we return a nested dictionary where the first level is
# the scope, and then the variables.
return cst.Dict(
[
cst.DictElement(
cst.SimpleString(f"'{scope}'"),
cst.Dict(
[
cst.DictElement(
cst.SimpleString(f"'{var_name}'"),
cst.Name(var.name),
)
for var_name, var in scoped[scope].items()
]
),
)
for scope in scoped.keys()
]
)
# no node is returned anymore
for node in graph.nodes():
if isinstance(node, Op):
node.is_returned = False
rng_node = Placeholder(lambda: cst.Param(cst.Name(value="rng_key")), "rng_key")
# Update the SampleOps to return a sample from the distribution so that
# `a <~ Normal(0, 1)` becomes `a = Normal(0, 1).sample(rng_key)`.
def distribution_to_sampler(cst_generator, *args, **kwargs):
rng_key = kwargs.pop("rng_key")
return cst.Call(
func=cst.Attribute(cst_generator(*args, **kwargs), cst.Name("sample")),
args=[cst.Arg(value=rng_key)],
)
def model_to_sampler(model_name, *args, **kwargs):
rng_key = kwargs.pop("rng_key")
return cst.Call(
func=cst.Attribute(cst.Name(value=model_name), cst.Name("sample")),
args=[cst.Arg(value=rng_key)] + list(args),
)
random_variables = []
for node in reversed(list(graph.random_variables)):
if isinstance(node, SampleModelOp):
node.cst_generator = partial(model_to_sampler, node.model_name)
else:
node.cst_generator = partial(distribution_to_sampler, node.cst_generator)
random_variables.append(node)
# Link the `rng_key` placeholder to the sampling expressions
graph.add(rng_node)
for var in random_variables:
graph.add_edge(rng_node, var, type="kwargs", key=["rng_key"])
for node in graph.random_variables:
if not isinstance(node, SampleModelOp):
continue
rv_name = node.name
returned_var_name = node.graph.returned_variables[0].name
def sample_index(rv, returned_var, *_):
return cst.Subscript(
cst.Name(rv),
[cst.SubscriptElement(cst.SimpleString(f"'{returned_var}'"))],
)
chosen_sample = Op(
partial(sample_index, rv_name, returned_var_name),
graph.name,
rv_name + "_value",
)
original_edges = []
data = []
out_nodes = []
| |
request: :class:`tencentcloud.bmlb.v20180625.models.DescribeL7BackendsRequest`
:rtype: :class:`tencentcloud.bmlb.v20180625.models.DescribeL7BackendsResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeL7Backends", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeL7BackendsResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribeL7ListenerInfo(self, request):
"""查找绑定了某主机或者有某转发域名黑石负载均衡七层监听器。
:param request: 调用DescribeL7ListenerInfo所需参数的结构体。
:type request: :class:`tencentcloud.bmlb.v20180625.models.DescribeL7ListenerInfoRequest`
:rtype: :class:`tencentcloud.bmlb.v20180625.models.DescribeL7ListenerInfoResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeL7ListenerInfo", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeL7ListenerInfoResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribeL7Listeners(self, request):
"""获取黑石负载均衡七层监听器列表信息。
:param request: 调用DescribeL7Listeners所需参数的结构体。
:type request: :class:`tencentcloud.bmlb.v20180625.models.DescribeL7ListenersRequest`
:rtype: :class:`tencentcloud.bmlb.v20180625.models.DescribeL7ListenersResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeL7Listeners", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeL7ListenersResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribeL7ListenersEx(self, request):
"""获取指定VPC下的7层监听器(支持模糊匹配)。
:param request: 调用DescribeL7ListenersEx所需参数的结构体。
:type request: :class:`tencentcloud.bmlb.v20180625.models.DescribeL7ListenersExRequest`
:rtype: :class:`tencentcloud.bmlb.v20180625.models.DescribeL7ListenersExResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeL7ListenersEx", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeL7ListenersExResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribeL7Rules(self, request):
"""获取黑石负载均衡七层转发规则。
:param request: 调用DescribeL7Rules所需参数的结构体。
:type request: :class:`tencentcloud.bmlb.v20180625.models.DescribeL7RulesRequest`
:rtype: :class:`tencentcloud.bmlb.v20180625.models.DescribeL7RulesResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeL7Rules", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeL7RulesResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribeLoadBalancerPortInfo(self, request):
"""获取黑石负载均衡端口相关信息。
:param request: 调用DescribeLoadBalancerPortInfo所需参数的结构体。
:type request: :class:`tencentcloud.bmlb.v20180625.models.DescribeLoadBalancerPortInfoRequest`
:rtype: :class:`tencentcloud.bmlb.v20180625.models.DescribeLoadBalancerPortInfoResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeLoadBalancerPortInfo", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeLoadBalancerPortInfoResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribeLoadBalancerTaskResult(self, request):
"""查询负载均衡实例异步任务的执行情况。
:param request: 调用DescribeLoadBalancerTaskResult所需参数的结构体。
:type request: :class:`tencentcloud.bmlb.v20180625.models.DescribeLoadBalancerTaskResultRequest`
:rtype: :class:`tencentcloud.bmlb.v20180625.models.DescribeLoadBalancerTaskResultResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeLoadBalancerTaskResult", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeLoadBalancerTaskResultResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribeLoadBalancers(self, request):
"""获取黑石负载均衡实例列表
:param request: 调用DescribeLoadBalancers所需参数的结构体。
:type request: :class:`tencentcloud.bmlb.v20180625.models.DescribeLoadBalancersRequest`
:rtype: :class:`tencentcloud.bmlb.v20180625.models.DescribeLoadBalancersResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeLoadBalancers", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeLoadBalancersResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribeTrafficMirrorListeners(self, request):
"""获取流量镜像的监听器列表信息。
:param request: 调用DescribeTrafficMirrorListeners所需参数的结构体。
:type request: :class:`tencentcloud.bmlb.v20180625.models.DescribeTrafficMirrorListenersRequest`
:rtype: :class:`tencentcloud.bmlb.v20180625.models.DescribeTrafficMirrorListenersResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeTrafficMirrorListeners", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeTrafficMirrorListenersResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribeTrafficMirrorReceiverHealthStatus(self, request):
"""获取流量镜像接收机健康状态。
:param request: 调用DescribeTrafficMirrorReceiverHealthStatus所需参数的结构体。
:type request: :class:`tencentcloud.bmlb.v20180625.models.DescribeTrafficMirrorReceiverHealthStatusRequest`
:rtype: :class:`tencentcloud.bmlb.v20180625.models.DescribeTrafficMirrorReceiverHealthStatusResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeTrafficMirrorReceiverHealthStatus", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeTrafficMirrorReceiverHealthStatusResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribeTrafficMirrorReceivers(self, request):
"""获取指定流量镜像实例的接收机信息。
:param request: 调用DescribeTrafficMirrorReceivers所需参数的结构体。
:type request: :class:`tencentcloud.bmlb.v20180625.models.DescribeTrafficMirrorReceiversRequest`
:rtype: :class:`tencentcloud.bmlb.v20180625.models.DescribeTrafficMirrorReceiversResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeTrafficMirrorReceivers", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeTrafficMirrorReceiversResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribeTrafficMirrors(self, request):
"""获取流量镜像实例的列表信息。
:param request: 调用DescribeTrafficMirrors所需参数的结构体。
:type request: :class:`tencentcloud.bmlb.v20180625.models.DescribeTrafficMirrorsRequest`
:rtype: :class:`tencentcloud.bmlb.v20180625.models.DescribeTrafficMirrorsResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeTrafficMirrors", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeTrafficMirrorsResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def ModifyL4BackendPort(self, request):
"""修改黑石负载均衡四层监听器后端实例端口。
:param request: 调用ModifyL4BackendPort所需参数的结构体。
:type request: :class:`tencentcloud.bmlb.v20180625.models.ModifyL4BackendPortRequest`
:rtype: :class:`tencentcloud.bmlb.v20180625.models.ModifyL4BackendPortResponse`
"""
try:
params = request._serialize()
body = self.call("ModifyL4BackendPort", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.ModifyL4BackendPortResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def ModifyL4BackendProbePort(self, request):
"""修改黑石负载均衡四层监听器后端探测端口。
:param request: 调用ModifyL4BackendProbePort所需参数的结构体。
:type request: :class:`tencentcloud.bmlb.v20180625.models.ModifyL4BackendProbePortRequest`
:rtype: :class:`tencentcloud.bmlb.v20180625.models.ModifyL4BackendProbePortResponse`
"""
try:
params = request._serialize()
body = self.call("ModifyL4BackendProbePort", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.ModifyL4BackendProbePortResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def ModifyL4BackendWeight(self, request):
"""修改黑石负载均衡四层监听器后端实例权重功能。
:param request: 调用ModifyL4BackendWeight所需参数的结构体。
:type request: :class:`tencentcloud.bmlb.v20180625.models.ModifyL4BackendWeightRequest`
:rtype: :class:`tencentcloud.bmlb.v20180625.models.ModifyL4BackendWeightResponse`
"""
try:
params = request._serialize()
body = self.call("ModifyL4BackendWeight", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.ModifyL4BackendWeightResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def ModifyL4Listener(self, request):
"""修改黑石负载均衡四层监听器。
:param request: 调用ModifyL4Listener所需参数的结构体。
:type request: :class:`tencentcloud.bmlb.v20180625.models.ModifyL4ListenerRequest`
:rtype: :class:`tencentcloud.bmlb.v20180625.models.ModifyL4ListenerResponse`
"""
try:
params = request._serialize()
body = self.call("ModifyL4Listener", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.ModifyL4ListenerResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def ModifyL7BackendPort(self, request):
"""修改黑石负载均衡七层转发路径后端实例端口。
:param request: 调用ModifyL7BackendPort所需参数的结构体。
:type request: :class:`tencentcloud.bmlb.v20180625.models.ModifyL7BackendPortRequest`
:rtype: :class:`tencentcloud.bmlb.v20180625.models.ModifyL7BackendPortResponse`
"""
try:
params = request._serialize()
body = self.call("ModifyL7BackendPort", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.ModifyL7BackendPortResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def ModifyL7BackendWeight(self, request):
"""修改黑石负载均衡七层转发路径后端实例权重。
:param request: 调用ModifyL7BackendWeight所需参数的结构体。
:type request: :class:`tencentcloud.bmlb.v20180625.models.ModifyL7BackendWeightRequest`
:rtype: :class:`tencentcloud.bmlb.v20180625.models.ModifyL7BackendWeightResponse`
"""
try:
params = request._serialize()
body = self.call("ModifyL7BackendWeight", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.ModifyL7BackendWeightResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def ModifyL7Listener(self, request):
"""修改黑石负载均衡七层监听器。
:param request: 调用ModifyL7Listener所需参数的结构体。
:type request: :class:`tencentcloud.bmlb.v20180625.models.ModifyL7ListenerRequest`
:rtype: :class:`tencentcloud.bmlb.v20180625.models.ModifyL7ListenerResponse`
"""
try:
params = request._serialize()
body = self.call("ModifyL7Listener", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.ModifyL7ListenerResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def ModifyL7Locations(self, request):
"""修改黑石负载均衡七层转发路径。
:param request: 调用ModifyL7Locations所需参数的结构体。
:type request: :class:`tencentcloud.bmlb.v20180625.models.ModifyL7LocationsRequest`
:rtype: :class:`tencentcloud.bmlb.v20180625.models.ModifyL7LocationsResponse`
"""
try:
params = request._serialize()
body = self.call("ModifyL7Locations", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.ModifyL7LocationsResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def ModifyLoadBalancer(self, request):
"""根据输入参数来修改黑石负载均衡实例的基本配置信息。可能的信息包括负载均衡实例的名称,域名前缀。
:param request: 调用ModifyLoadBalancer所需参数的结构体。
:type request: :class:`tencentcloud.bmlb.v20180625.models.ModifyLoadBalancerRequest`
:rtype: :class:`tencentcloud.bmlb.v20180625.models.ModifyLoadBalancerResponse`
"""
try:
params = request._serialize()
body = self.call("ModifyLoadBalancer", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.ModifyLoadBalancerResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def ModifyLoadBalancerChargeMode(self, request):
"""更改黑石负载均衡的计费方式
:param request: 调用ModifyLoadBalancerChargeMode所需参数的结构体。
| |
ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(ResourceListOfWorkflow, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_workflows" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
header_params['Accept-Encoding'] = "gzip, deflate, br"
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
response_types_map = {
200: "ResourceListOfWorkflow",
}
return self.api_client.call_api(
'/api/v1/workflows', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def transit_workflow(self, workflow_instance_id, transit_workflow_request, **kwargs): # noqa: E501
"""[EXPERIMENTAL] Transit workflow # noqa: E501
This would transit the workflow instance to proceed to next step using WorkflowInstanceId with the attached data/context and action # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.transit_workflow(workflow_instance_id, transit_workflow_request, async_req=True)
>>> result = thread.get()
:param workflow_instance_id: Unique workflow instance identifier (required)
:type workflow_instance_id: str
:param transit_workflow_request: Data associated with the transit request (required)
:type transit_workflow_request: TransitWorkflowRequest
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: WorkflowInstance
"""
kwargs['_return_http_data_only'] = True
return self.transit_workflow_with_http_info(workflow_instance_id, transit_workflow_request, **kwargs) # noqa: E501
def transit_workflow_with_http_info(self, workflow_instance_id, transit_workflow_request, **kwargs): # noqa: E501
"""[EXPERIMENTAL] Transit workflow # noqa: E501
This would transit the workflow instance to proceed to next step using WorkflowInstanceId with the attached data/context and action # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.transit_workflow_with_http_info(workflow_instance_id, transit_workflow_request, async_req=True)
>>> result = thread.get()
:param workflow_instance_id: Unique workflow instance identifier (required)
:type workflow_instance_id: str
:param transit_workflow_request: Data associated with the transit request (required)
:type transit_workflow_request: TransitWorkflowRequest
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(WorkflowInstance, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'workflow_instance_id',
'transit_workflow_request'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method transit_workflow" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'workflow_instance_id' is set
if self.api_client.client_side_validation and ('workflow_instance_id' not in local_var_params or # noqa: E501
local_var_params['workflow_instance_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `workflow_instance_id` when calling `transit_workflow`") # noqa: E501
# verify the required parameter 'transit_workflow_request' is set
if self.api_client.client_side_validation and ('transit_workflow_request' not in local_var_params or # noqa: E501
local_var_params['transit_workflow_request'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `transit_workflow_request` when calling `transit_workflow`") # noqa: E501
collection_formats = {}
path_params = {}
if 'workflow_instance_id' in local_var_params:
path_params['workflowInstanceId'] = local_var_params['workflow_instance_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'transit_workflow_request' in local_var_params:
body_params = local_var_params['transit_workflow_request']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
header_params['Accept-Encoding'] = "gzip, deflate, br"
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json-patch+json', 'application/json', 'text/json', 'application/*+json']) # noqa: E501
# set the LUSID header
header_params['X-LUSID-SDK-Language'] = 'Python'
header_params['X-LUSID-SDK-Version'] = '0.1.25'
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
response_types_map = {
200: "WorkflowInstance",
400: "LusidValidationProblemDetails",
404: "str",
}
return self.api_client.call_api(
'/api/v1/workflows/instances/{workflowInstanceId}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def transit_workflow_by_correlation_id(self, correlation_id, transit_workflow_request, **kwargs): # noqa: E501
"""[EXPERIMENTAL] Transit workflow # noqa: E501
This would transit the workflow instance to proceed to next step using CorrelationId with the attached data/context and action # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.transit_workflow_by_correlation_id(correlation_id, transit_workflow_request, async_req=True)
>>> result = thread.get()
:param correlation_id: CorrelationId associated with the workflow instance (required)
:type correlation_id: str
:param transit_workflow_request: Data associated with the transit request (required)
:type transit_workflow_request: TransitWorkflowRequest
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: WorkflowInstance
"""
kwargs['_return_http_data_only'] = True
return self.transit_workflow_by_correlation_id_with_http_info(correlation_id, transit_workflow_request, **kwargs) # noqa: E501
def transit_workflow_by_correlation_id_with_http_info(self, correlation_id, transit_workflow_request, **kwargs): # noqa: E501
"""[EXPERIMENTAL] Transit workflow # noqa: E501
This would transit the workflow instance to proceed to next step using CorrelationId with the attached data/context and action # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.transit_workflow_by_correlation_id_with_http_info(correlation_id, transit_workflow_request, async_req=True)
>>> result = thread.get()
:param correlation_id: CorrelationId associated with the workflow instance (required)
:type correlation_id: str
:param transit_workflow_request: Data associated with the transit request (required)
:type transit_workflow_request: TransitWorkflowRequest
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(WorkflowInstance, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'correlation_id',
'transit_workflow_request'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method transit_workflow_by_correlation_id" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'correlation_id' is set
if self.api_client.client_side_validation and ('correlation_id' not in local_var_params or # noqa: E501
local_var_params['correlation_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `correlation_id` when calling `transit_workflow_by_correlation_id`") # noqa: E501
# verify the required parameter 'transit_workflow_request' is set
if self.api_client.client_side_validation and ('transit_workflow_request' not in local_var_params or # noqa: E501
local_var_params['transit_workflow_request'] is None): | |
import logging
import numpy as np
import scipy
import numba
import opt_einsum
import pyfftw
import mathx
from .. import bvar
from . import sa
logger = logging.getLogger(__name__)
fft = lambda Ar, axis=-1:pyfftw.interfaces.numpy_fft.fft(Ar, norm='ortho', axis=axis)
ifft = lambda Ak, axis=-1:pyfftw.interfaces.numpy_fft.ifft(Ak, norm='ortho', axis=axis)
fft2 = lambda Ar:pyfftw.interfaces.numpy_fft.fft2(Ar, norm='ortho')
ifft2 = lambda Ak:pyfftw.interfaces.numpy_fft.ifft2(Ak, norm='ortho')
empty = lambda shape:pyfftw.empty_aligned(shape, complex)
def make_fft_matrix(num_points):
eye = pyfftw.byte_align(np.eye(num_points))
matrix = fft(eye)
return matrix
def make_ifft_arbitrary_matrix(r_support0, num_points0, q_center0, r1):
"""
Warning: The matrix does not enforce a particular subset of the infinite periodic r1 domain. So if the range of r1
is greater than 2*pi/r_support0, then the output will be repeated.
Args:
r_support0:
num_points0:
q_center0:
r1:
Returns:
"""
assert r1.ndim == 1
q0 = sa.calc_q(r_support0, num_points0, q_center0)
return mathx.expj(q0*r1[:, None])/len(q0)**0.5
# def make_ifft_arbitrary_matrix(r_support0, num_points0, q_center0, r_support1, num_points1, r_center1):
# q0 = sa.calc_q(r_support0, num_points0, q_center0)
# r1 = sa.calc_r(r_support1, num_points1, r_center1)[:, None]
# return mathx.expj(q0*r1)/num_points0**0.5
def expand_kz(k, kxc, kyc):
"""Expand z component of wavenumber to second order.
Args:
k: Wavenumber.
kxc: Expansion kx coordinate.
kyc: Expansion ky coordinate.
Returns:
kz: Zeroth order term.
gxkz: First derivative w.r.t x.
gykz: First derivative w.r.t y.
gxxkz: Second derivative w.r.t. x.
gyykz: Second derivative w.r.t. y.
"""
kz = (k**2 - kxc**2 - kyc**2)**0.5
gxkz = -kxc/kz
gykz = -kyc/kz
denominator = (k**2 - kxc**2 - kyc**2)**(3/2)
gxxkz = -(k**2 - kyc**2)/denominator
gyykz = -(k**2 - kxc**2)/denominator
gxykz = -kxc*kyc/denominator
return kz, gxkz, gykz, gxxkz, gyykz, gxykz
def calc_quadratic_kz_correction(k, kxc, kyc):
"""Calculate correction that needs to be applied to propagation with quadratic dispersion relation.
The propagator that should be applied is -(fx*kx**2 + fy*ky**2)*z/(2*k) (no zeroth order term)
Args:
kxc: central x wavenumber
kyc: central y wavenumber
Returns:
fx: factor by which propagation distance should be multiplied to give quadratic x propagation distance
fy: same for y
delta_kz: Zero order correction factor. The corresponding additional phase is delta_kz*z.
delta_gxkz: first order x correction factor i.e. result should have phase of delta_gxkz*kx*z applied. Negative of
the corresponding translation.
delta_gykz: same for y.
"""
# Get zeroth, first and second order (nonmixed) derivatives of k at (kxc, kyc).
kz, gxkz, gykz, gxxkz, gyykz, gxykz = expand_kz(k, kxc, kyc)
# Calculate first and second order derivatives of the quadratic approximation at (kxc, kyc).
gxkz_p = -kxc/k
gykz_p = -kyc/k
gxxkz_p = -1/k
gyykz_p = -1/k
# Correction factors are ratios of correct second derivatives to the quadratic approximation ones.
fx = gxxkz/gxxkz_p
fy = gyykz/gyykz_p
# Shift corrections are difference between correct first order terms and quadratic approximation ones.
delta_gxkz = gxkz - fx*gxkz_p
delta_gykz = gykz - fy*gykz_p
# Evaluate zeroth order kz for x and y quadratic approximation.
kz_px = -kxc**2/(2*k)
kz_py = -kyc**2/(2*k)
# Zeroth order correction is what is required to match k_z at (kxc, kyc) after quadratic approximation propagation
# (with factors fx and fy) and shift correction has been applied.
delta_kz = kz - fx*kz_px - fy*kz_py - delta_gxkz*kxc - delta_gykz*kyc
return fx, fy, delta_kz, delta_gxkz, delta_gykz
@numba.vectorize([numba.float64(numba.float64, numba.float64, numba.float64)])
def calc_kz_exact(k, kx, ky):
return (k**2 - kx**2 - ky**2)**0.5
@numba.vectorize([numba.float64(numba.float64, numba.float64, numba.float64)])
def calc_kz_paraxial(k, kx, ky):
return k - (kx**2 + ky**2)/(2*k)
@numba.vectorize([numba.float64(numba.float64, numba.float64, numba.float64)])
def calc_kz_quadratic(k, kx, ky):
return -(kx**2 + ky**2)/(2*k)
@numba.vectorize([numba.float64(numba.float64, numba.float64)])
def calc_kz_quadratic_1d(k, q):
"""kz not included."""
return -q**2/(2*k)
@numba.vectorize([numba.complex64(numba.float64, numba.float64, numba.float64, numba.float64)])
def calc_propagator_exact(k, kx, ky, l):
return mathx.expj(calc_kz_exact(k, kx, ky)*l)
@numba.vectorize([numba.complex64(numba.float64, numba.float64, numba.float64, numba.float64)])
def calc_propagator_paraxial(k, kx, ky, l):
return mathx.expj(calc_kz_paraxial(k, kx, ky)*l)
@numba.vectorize([numba.complex64(numba.float64, numba.float64, numba.float64, numba.float64)])
def calc_propagator_quadratic(k, kx, ky, l):
return mathx.expj(calc_kz_quadratic(k, kx, ky)*l)
@numba.vectorize([numba.complex64(numba.float64, numba.float64, numba.float64)], nopython=True)
def calc_propagator_quadratic_1d(k, q, l):
"""kz not included."""
return mathx.expj(calc_kz_quadratic_1d(k, q)*l)
@numba.vectorize([numba.complex64(numba.float64, numba.float64, numba.float64)])
def calc_quadratic_phase_1d(k, r, roc):
return mathx.expj(k*r**2/(2*roc))
# def prepare_curved_propagation_1d_tilted_shifted(k, r_support, Er, z, m, r_center = 0, q_center = 0, axis = -1):
# num_points = Er.shape[axis]
# r = calc_r(r_support, num_points, r_center)
# Er *= np.exp(-1j*q_center*r)
# propagator, post_factor = prepare_curved_propagation_1d(k, r_support, Er, z, m, axis)
# r_shift = q_center/k*z
# rp_center = r_center+r_shift
# rp = calc_r(r_support*m, num_points, rp_center)
# post_factor *= np.exp(1j*q_center*rp-1j*q_center**2*z/(2*k))
# return propagator, post_factor, rp_center
def calc_flat_plane(k, r_support, num_points, roc):
r_typical = r_support/(np.pi*num_points)**0.5
if np.isfinite(roc):
num_rayleighs_flat = k*r_typical**2/(2*roc)
m_flat = (1 + num_rayleighs_flat**2)**0.5
else:
m_flat = 1
num_rayleighs_flat = 0
r_typical_flat = r_typical/m_flat
z_R = r_typical_flat**2*k/2
z_flat = num_rayleighs_flat*z_R
return m_flat, z_flat, z_R
def calc_curved_propagation(k, r_support, num_points, roc, z):
m_flat, z_flat, z_R = calc_flat_plane(k, r_support, num_points, roc)
num_rayleighs = (z + z_flat)/z_R
m = (1 + num_rayleighs**2)**0.5/m_flat
return m_flat, z_flat, m
def calc_curved_propagation_m(k, r_support, num_points, roc, z):
return calc_curved_propagation(k, r_support, num_points, roc, z)[2]
def calc_kz(k, kx, ky, kz_mode:str='exact'):
if kz_mode == 'paraxial':
kz = k
elif kz_mode in ('local_xy', 'local', 'exact'):
kz = (k**2 - kx**2 - ky**2)**0.5
else:
raise ValueError('Unknown kz_mode %s.', kz_mode)
return kz
def adjust_r(k, ri, z, qs, kz_mode='local_xy'):
qs = sa.to_scalar_pair(qs)
kz = calc_kz(k, *qs, kz_mode)
ro = ri + qs/kz*z
return ro
# def refract_field(normal, k1, Er, gradEr, k2):
# I = mathx.abs_sqd(Er)
# Ik = [(component*Er.conj()).imag for component in gradEr]
# # Our k-vector is gradient of the eikonal (with scalar k included). In the short wavelength approximation (geometrical optics)
# # the length of the eikonal is k. (See Born & Wolf eq. 15b in sec. 3.1.)
# Ik[2] = np.maximum((I*k1)**2-mathx.dot(Ik[:2]), 0)**0.5
# Ik_tangent = mathx.project(Ik, normal)
# Ik_normal = np.maximum((I*k2)**2-mathx.dot(Ik_tangent), 0)**0.5
# Ik2 = [tc+nc*Ik_normal for tc, nc in zip(Ik_tangent, normal)]
# return Ik2, I
def calc_propagation_m_1d(k, r_support, var_r0, phi_c, var_q, z, num_points):
"""Calculate sensible magnification for Sziklas-Siegman propagation.
The magnification is chosen so that the ratio of the RMS radius to the support will be equal in real and angular space.
This has gone through various versions - basically the right answer depends a lot on context. The current version
aims to keep it simple.
The operations are all elementwise, so broadcasting any argument is allowed.
Args:
k: Wavenumber.
r_support:
"""
q_support = 2*np.pi*num_points/r_support
# Calculate real-space variance at z. From this infer a lower limit on the magnification.
var_rz, phi_cz, _ = bvar.calc_propagated_variance_1d(k, var_r0, phi_c, var_q, z)
m_lower = var_rz**0.5*12/r_support
# Calculate minimum angular variance at z. This will not change with propagation. From this infer an upper
# limit on the magnification.
var_q_min = bvar.calc_minimum_angular_variance_1d(var_rz, phi_cz, var_q)
m_upper = q_support/(var_q_min**0.5*12)
if np.any(m_lower > m_upper):
logger.warning('Magnification lower bound greater than upper bound.')
m = m_lower
return m
def calc_propagation_ms(k, rs_support, var_r0s, phi_cs, var_qs, zs, num_pointss, f_nexts=(np.inf, np.inf), qfds=(1, 1)):
rs_support = sa.to_scalar_pair(rs_support)
var_r0s = sa.to_scalar_pair(var_r0s)
phi_cs = sa.to_scalar_pair(phi_cs)
var_qs = sa.to_scalar_pair(var_qs)
zs = sa.to_scalar_pair(zs)
num_pointss = sa.to_scalar_pair(num_pointss)
f_nexts = sa.to_scalar_pair(f_nexts)
qfds = sa.to_scalar_pair(qfds)
ms = []
for r_support, var_r0, phi_c, var_q, z, num_points, f_next, qfd in zip(rs_support, var_r0s, phi_cs, var_qs, zs,
num_pointss, f_nexts, qfds):
ms.append(calc_propagation_m_1d(k, r_support, var_r0, phi_c, var_q, z, num_points, f_next, qfd))
return np.asarray(ms)
def calc_spherical_post_factor(k, rs_support, num_pointss, z, ms, rs_center=(0, 0), qs_center=(0, 0), ro_centers=None,
kz_mode='local_xy'):
assert np.isscalar(z)
ro_supports = rs_support*ms
qs_center = sa.to_scalar_pair(qs_center)
if kz_mode == 'paraxial':
zx = z
zy = z
kz_center = k
delta_kz = k
elif kz_mode == 'local_xy':
fx, fy, delta_kz, delta_gxkz, delta_gykz = calc_quadratic_kz_correction(k, *qs_center)
zx = fx*z
zy = fy*z
kz_center = (k**2 - (qs_center**2).sum())**0.5
else:
raise ValueError('Unknown kz_ mode %s.', kz_mode)
if ro_centers is None:
ro_centers = rs_center + qs_center/kz_center*z
xo, yo = sa.calc_xy(ro_supports, num_pointss, ro_centers)
if kz_mode == 'local_xy':
xo += delta_gxkz*z
yo += delta_gykz*z
roc_x = zx/(ms[0] - 1)
roc_y = zy/(ms[1] - 1)
roc_xo = roc_x + zx
roc_yo = roc_y + zy
# See derivation page 114 Dane's logbook 2.
Qo = calc_quadratic_phase_1d(k, xo, roc_xo)*calc_quadratic_phase_1d(k, yo, roc_yo)*mathx.expj(delta_kz*z + k*(
rs_center[0]**2/(2*roc_x) + rs_center[1]**2/(2*roc_y) - rs_center[0]*xo/(roc_x*ms[0]) - rs_center[
1]*yo/(roc_y*ms[1])))/(ms[0]*ms[1])**0.5
return Qo
def calc_plane_to_curved_flat_factors(k, rs_support, num_pointss, z, qs_center=(0, 0), kz_mode='local_xy'):
"""Calculate factors for propagation from plane to uniformly sampled curved surface.
Args:
k:
rs_support:
num_pointss:
z:
qs_center:
kz_mode (str): 'paraxial' or 'local_xy'.
Returns:
invTx (2D array):
gradxinvTx (2D array):
invTy (2D array):
gradyinvTy (2D array):
Px (3D array):
Py (3D array):
Tx (2D array):
Ty (2D array):
"""
assert kz_mode in ('local_xy', 'paraxial')
Tx = make_fft_matrix(num_pointss[0])
if num_pointss[1] == num_pointss[0]:
Ty = Tx
else:
Ty = make_fft_matrix(num_pointss[1])
if kz_mode == 'local_xy':
fx, fy, delta_kz, delta_gxkz, delta_gykz = calc_quadratic_kz_correction(k, *qs_center)
zx = z*fx
zy = z*fy
else:
zx = z
zy = z
delta_kz = k
kx, ky = sa.calc_kxky(rs_support, num_pointss, qs_center)
# kx | |
3.1217e-01,
1318.0: 3.3328e-01,
1319.0: 2.6855e-01,
1320.0: 2.5872e-01,
1321.0: 2.9866e-01,
1322.0: 3.0217e-01,
1323.0: 2.3279e-01,
1324.0: 2.6249e-01,
1325.0: 3.2224e-01,
1326.0: 2.8051e-01,
1327.0: 2.6625e-01,
1328.0: 2.3450e-01,
1329.0: 1.7759e-01,
1330.0: 2.2923e-01,
1331.0: 1.4480e-01,
1332.0: 1.4579e-01,
1333.0: 2.0304e-01,
1334.0: 1.6925e-01,
1335.0: 2.3117e-01,
1336.0: 1.8348e-01,
1337.0: 1.6454e-01,
1338.0: 1.7804e-01,
1339.0: 1.7681e-01,
1340.0: 1.6831e-01,
1341.0: 1.7039e-01,
1342.0: 1.7798e-01,
1343.0: 1.2711e-01,
1344.0: 7.5645e-02,
1345.0: 1.0904e-01,
1346.0: 5.8186e-02,
1347.0: 6.0119e-02,
1348.0: 4.7451e-03,
1349.0: 1.6159e-02,
1350.0: 1.6025e-02,
1351.0: 4.6298e-03,
1352.0: 1.5164e-03,
1353.0: 9.6096e-05,
1354.0: 2.9009e-04,
1355.0: 3.6034e-06,
1356.0: 4.8070e-05,
1357.0: 7.1786e-05,
1358.0: 4.1948e-06,
1359.0: 7.3439e-07,
1360.0: 2.1404e-06,
1361.0: 4.8133e-09,
1362.0: 1.8076e-11,
1363.0: 3.1563e-06,
1364.0: 1.3589e-06,
1365.0: 9.0764e-12,
1366.0: 1.2791e-05,
1367.0: 4.9764e-06,
1368.0: 1.4810e-13,
1369.0: 5.1667e-07,
1370.0: 2.9200e-07,
1371.0: 1.9731e-08,
1372.0: 2.7498e-06,
1373.0: 4.4401e-05,
1374.0: 1.7917e-04,
1375.0: 3.2332e-04,
1376.0: 2.5748e-04,
1377.0: 1.2270e-04,
1378.0: 1.1089e-03,
1379.0: 5.2164e-05,
1380.0: 8.1587e-05,
1381.0: 2.3716e-06,
1382.0: 2.5672e-06,
1383.0: 4.4017e-08,
1384.0: 6.1689e-07,
1385.0: 2.0899e-06,
1386.0: 2.5215e-06,
1387.0: 1.9896e-04,
1388.0: 4.0262e-06,
1389.0: 5.8098e-04,
1390.0: 4.9328e-04,
1391.0: 3.4384e-04,
1392.0: 2.3782e-05,
1393.0: 1.1586e-04,
1394.0: 7.5526e-05,
1395.0: 6.7136e-07,
1396.0: 6.3215e-09,
1397.0: 4.9057e-05,
1398.0: 1.2704e-03,
1399.0: 8.1226e-04,
1400.0: 3.2466e-09,
1401.0: 1.0528e-08,
1402.0: 1.8353e-03,
1403.0: 2.3800e-03,
1404.0: 7.3892e-04,
1405.0: 3.6444e-07,
1406.0: 2.0448e-03,
1407.0: 1.7457e-04,
1408.0: 1.6493e-03,
1409.0: 6.1919e-04,
1410.0: 4.6653e-04,
1411.0: 2.1142e-03,
1412.0: 2.6396e-03,
1413.0: 2.3353e-02,
1414.0: 3.6378e-04,
1415.0: 1.8366e-04,
1416.0: 3.5565e-02,
1417.0: 1.1759e-02,
1418.0: 1.3559e-02,
1419.0: 2.1442e-03,
1420.0: 8.2718e-03,
1421.0: 9.1637e-03,
1422.0: 4.6314e-02,
1423.0: 9.2198e-03,
1424.0: 1.6975e-02,
1425.0: 2.5850e-02,
1426.0: 2.7792e-02,
1427.0: 4.9546e-02,
1428.0: 4.5588e-03,
1429.0: 3.8020e-02,
1430.0: 6.1601e-02,
1431.0: 5.0156e-02,
1432.0: 2.5194e-03,
1433.0: 3.5834e-02,
1434.0: 2.0962e-02,
1435.0: 2.1416e-02,
1436.0: 3.8351e-02,
1437.0: 2.9880e-02,
1438.0: 1.3263e-02,
1439.0: 5.1039e-02,
1440.0: 3.9601e-02,
1441.0: 3.1800e-02,
1442.0: 3.6317e-02,
1443.0: 4.5063e-02,
1444.0: 6.1791e-02,
1445.0: 4.9751e-02,
1446.0: 2.3095e-02,
1447.0: 3.6215e-02,
1448.0: 1.1569e-01,
1449.0: 1.0213e-01,
1450.0: 2.7412e-02,
1451.0: 1.1271e-02,
1452.0: 6.2361e-02,
1453.0: 8.1978e-02,
1454.0: 1.3759e-01,
1455.0: 6.6150e-02,
1456.0: 8.8509e-02,
1457.0: 1.1700e-01,
1458.0: 1.3643e-01,
1459.0: 1.6307e-01,
1460.0: 8.5421e-02,
1461.0: 9.0276e-02,
1462.0: 1.3060e-01,
1463.0: 4.3225e-02,
1464.0: 1.5184e-01,
1465.0: 9.3383e-02,
1466.0: 6.5197e-02,
1467.0: 3.6054e-02,
1468.0: 7.6942e-02,
1469.0: 9.4845e-02,
1470.0: 4.9678e-02,
1471.0: 1.7848e-02,
1472.0: 4.6771e-02,
1473.0: 7.0198e-02,
1474.0: 9.7339e-02,
1475.0: 1.8463e-01,
1476.0: 6.8778e-02,
1477.0: 6.9736e-02,
1478.0: 6.3480e-02,
1479.0: 1.2001e-01,
1480.0: 6.0637e-02,
1481.0: 1.1529e-01,
1482.0: 5.8490e-02,
1483.0: 1.4859e-01,
1484.0: 1.3747e-01,
1485.0: 1.2503e-01,
1486.0: 1.2340e-01,
1487.0: 6.0629e-02,
1488.0: 9.4180e-02,
1489.0: 1.8973e-01,
1490.0: 1.7478e-01,
1491.0: 1.9778e-01,
1492.0: 1.6441e-01,
1493.0: 1.8157e-01,
1494.0: 2.0367e-01,
1495.0: 1.8253e-01,
1496.0: 1.6852e-01,
1497.0: 2.2850e-01,
1498.0: 1.8968e-01,
1499.0: 2.1759e-01,
1500.0: 2.5061e-01,
1501.0: 2.6552e-01,
1502.0: 2.3356e-01,
1503.0: 1.8493e-01,
1504.0: 1.6029e-01,
1505.0: 1.8402e-01,
1506.0: 2.5773e-01,
1507.0: 2.5514e-01,
1508.0: 2.4302e-01,
1509.0: 1.8690e-01,
1510.0: 2.7052e-01,
1511.0: 2.6474e-01,
1512.0: 2.6068e-01,
1513.0: 2.4239e-01,
1514.0: 2.2571e-01,
1515.0: 2.6573e-01,
1516.0: 2.5683e-01,
1517.0: 2.4929e-01,
1518.0: 2.5211e-01,
1519.0: 2.4437e-01,
1520.0: 2.6450e-01,
1521.0: 2.7505e-01,
1522.0: 2.6378e-01,
1523.0: 2.8004e-01,
1524.0: 2.7539e-01,
1525.0: 2.5884e-01,
1526.0: 2.6745e-01,
1527.0: 2.6220e-01,
1528.0: 2.7928e-01,
1529.0: 2.7244e-01,
1530.0: 2.5522e-01,
1531.0: 2.6973e-01,
1532.0: 2.7839e-01,
1533.0: 2.7714e-01,
1534.0: 2.6892e-01,
1535.0: 2.6686e-01,
1536.0: 2.7464e-01,
1537.0: 2.7336e-01,
1538.0: 2.7202e-01,
1539.0: 2.7295e-01,
1540.0: 2.6491e-01,
1541.0: 2.6904e-01,
1542.0: 2.6927e-01,
1543.0: 2.7208e-01,
1544.0: 2.7210e-01,
1545.0: 2.7705e-01,
1546.0: 2.7481e-01,
1547.0: 2.7309e-01,
1548.0: 2.6675e-01,
1549.0: 2.7342e-01,
1550.0: 2.6990e-01,
1551.0: 2.7058e-01,
1552.0: 2.7182e-01,
1553.0: 2.7132e-01,
1554.0: 2.6474e-01,
1555.0: 2.6759e-01,
1556.0: 2.6310e-01,
1557.0: 2.7062e-01,
1558.0: 2.6848e-01,
1559.0: 2.6808e-01,
1560.0: 2.6568e-01,
1561.0: 2.7002e-01,
1562.0: 2.6756e-01,
1563.0: 2.6667e-01,
1564.0: 2.6264e-01,
1565.0: 2.6728e-01,
1566.0: 2.6245e-01,
1567.0: 2.6308e-01,
1568.0: 2.5722e-01,
1569.0: 2.5452e-01,
1570.0: 2.4175e-01,
1571.0: 2.3507e-01,
1572.0: 2.3775e-01,
1573.0: 2.3407e-01,
1574.0: 2.4145e-01,
1575.0: 2.3974e-01,
1576.0: 2.4678e-01,
1577.0: 2.1602e-01,
1578.0: 2.3516e-01,
1579.0: 2.3672e-01,
1580.0: 2.4464e-01,
1581.0: 2.4870e-01,
1582.0: 2.4195e-01,
1583.0: 2.4755e-01,
1584.0: 2.4904e-01,
1585.0: 2.5874e-01,
1586.0: 2.5569e-01,
1587.0: 2.5303e-01,
1588.0: 2.5107e-01,
1589.0: 2.3233e-01,
1590.0: 2.4179e-01,
1591.0: 2.4197e-01,
1592.0: 2.5225e-01,
1593.0: 2.5833e-01,
1594.0: 2.5624e-01,
1595.0: 2.5823e-01,
1596.0: 2.4452e-01,
1597.0: 2.4692e-01,
1598.0: 2.5421e-01,
1599.0: 2.4202e-01,
1600.0: 2.3810e-01,
1601.0: 2.2323e-01,
1602.0: 2.2413e-01,
1603.0: 2.2397e-01,
1604.0: 2.2842e-01,
1605.0: 2.3683e-01,
1606.0: 2.4140e-01,
1607.0: 2.3296e-01,
1608.0: 2.2990e-01,
1609.0: 2.2727e-01,
1610.0: 2.1760e-01,
1611.0: 2.2680e-01,
1612.0: 2.3076e-01,
1613.0: 2.3719e-01,
1614.0: 2.3838e-01,
1615.0: 2.4104e-01,
1616.0: 2.3050e-01,
1617.0: 2.3465e-01,
1618.0: 2.4352e-01,
1619.0: 2.4100e-01,
1620.0: 2.3449e-01,
1621.0: 2.3430e-01,
1622.0: 2.3754e-01,
1623.0: 2.4246e-01,
1624.0: 2.4269e-01,
1625.0: 2.3782e-01,
1626.0: 2.3971e-01,
1627.0: 2.4078e-01,
1628.0: 2.4126e-01,
1629.0: 2.4137e-01,
1630.0: 2.3651e-01,
1631.0: 2.3806e-01,
1632.0: 2.3821e-01,
1633.0: 2.3267e-01,
1634.0: 2.3282e-01,
1635.0: 2.3367e-01,
1636.0: 2.3539e-01,
1637.0: 2.2700e-01,
1638.0: 2.2007e-01,
1639.0: 2.2026e-01,
1640.0: 2.1511e-01,
1641.0: 2.1960e-01,
1642.0: 2.2082e-01,
1643.0: 2.1535e-01,
1644.0: 2.2355e-01,
1645.0: 2.1822e-01,
1646.0: 2.1749e-01,
1647.0: 2.2768e-01,
1648.0: 2.1655e-01,
1649.0: 2.1867e-01,
1650.0: 2.2526e-01,
1651.0: 2.0855e-01,
1652.0: 2.2373e-01,
1653.0: 2.2277e-01,
1654.0: 2.1583e-01,
1655.0: 2.2231e-01,
1656.0: 2.2101e-01,
1657.0: 2.2223e-01,
1658.0: 2.2487e-01,
1659.0: 2.2120e-01,
1660.0: 2.2332e-01,
1661.0: 2.2384e-01,
1662.0: 2.1908e-01,
1663.0: 2.2235e-01,
1664.0: 2.2098e-01,
1665.0: 2.1178e-01,
1666.0: 1.7884e-01,
1667.0: 2.1068e-01,
1668.0: 2.1459e-01,
1669.0: 2.1516e-01,
1670.0: 2.2168e-01,
1671.0: 2.1879e-01,
1672.0: 2.1147e-01,
1673.0: 2.1629e-01,
1674.0: 2.1575e-01,
1675.0: 2.1360e-01,
1676.0: 2.1145e-01,
1677.0: 2.1229e-01,
1678.0: 2.0915e-01,
1679.0: 2.1303e-01,
1680.0: 2.0558e-01,
1681.0: 1.9447e-01,
1682.0: 2.0366e-01,
1683.0: 2.0906e-01,
1684.0: 1.9797e-01,
1685.0: 2.1321e-01,
1686.0: 2.1026e-01,
1687.0: 2.0484e-01,
1688.0: 2.1013e-01,
1689.0: 2.0718e-01,
1690.0: 2.0523e-01,
1691.0: 1.9303e-01,
1692.0: 2.0708e-01,
1693.0: 2.1134e-01,
1694.0: 2.0477e-01,
1695.0: 2.0968e-01,
1696.0: 2.0922e-01,
1697.0: 1.8107e-01,
1698.0: 2.0739e-01,
1699.0: 2.0551e-01,
1700.0: 1.9975e-01,
1702.0: 2.0396e-01,
1705.0: 1.9778e-01,
1710.0: 1.8790e-01,
1715.0: 1.8965e-01,
1720.0: 1.8698e-01,
1725.0: 1.7808e-01,
1730.0: 1.7407e-01,
1735.0: 1.6154e-01,
1740.0: 1.6818e-01,
1745.0: 1.5481e-01,
1750.0: 1.6566e-01,
1755.0: 1.5301e-01,
1760.0: 1.5998e-01,
1765.0: 1.3284e-01,
1770.0: 1.4172e-01,
1775.0: 1.1484e-01,
1780.0: 1.0050e-01,
1785.0: 7.6981e-02,
1790.0: 8.8904e-02,
1795.0: 4.6931e-02,
1800.0: 3.1828e-02,
1805.0: 1.4815e-02,
1810.0: 9.6911e-03,
1815.0: 3.2816e-03,
1820.0: 9.8755e-04,
1825.0: 1.2744e-03,
1830.0: 5.2041e-06,
1835.0: 6.4190e-06,
1840.0: 6.2703e-08,
1845.0: 6.2658e-06,
1850.0: 2.9993e-06,
1855.0: 2.8396e-07,
1860.0: 1.1151e-05,
1865.0: 1.6982e-05,
1870.0: 2.6662e-10,
1875.0: 4.5130e-10,
1880.0: 7.7505e-05,
1885.0: 4.3890e-05,
1890.0: 2.2333e-04,
1895.0: 1.2947e-04,
1900.0: 8.6221e-07,
1905.0: 5.6667e-07,
1910.0: 2.3045e-05,
1915.0: 1.9947e-05,
1920.0: 4.5069e-04,
1925.0: 9.3615e-04,
1930.0: 5.5242e-04,
1935.0: 3.5935e-03,
1940.0: 3.2821e-03,
1945.0: 1.0863e-02,
1950.0: 1.6727e-02,
1955.0: 1.0036e-02,
1960.0: 2.1906e-02,
1965.0: 2.8563e-02,
1970.0: 4.8847e-02,
1975.0: 6.7857e-02,
1980.0: 7.5512e-02,
1985.0: 8.3063e-02,
1990.0: 8.5613e-02,
1995.0: 8.1190e-02,
2000.0: 3.8156e-02,
2005.0: 1.5001e-02,
2010.0: 3.9748e-02,
2015.0: 2.6648e-02,
2020.0: 4.4981e-02,
2025.0: 7.4010e-02,
2030.0: 8.4856e-02,
2035.0: 9.6386e-02,
2040.0: 8.9781e-02,
2045.0: 9.1074e-02,
2050.0: 6.7927e-02,
2055.0: 5.4906e-02,
2060.0: 6.9193e-02,
2065.0: 6.1875e-02,
2070.0: 6.5676e-02,
2075.0: 7.7443e-02,
2080.0: 8.6812e-02,
2085.0: 8.5102e-02,
2090.0: 8.9100e-02,
2095.0: 8.9747e-02,
2100.0: 8.6133e-02,
2105.0: 9.3153e-02,
2110.0: 8.9654e-02,
2115.0: 9.1673e-02,
2120.0: 8.7588e-02,
2125.0: 8.8632e-02,
2130.0: 8.9774e-02,
2135.0: 9.0044e-02,
2140.0: 9.0767e-02,
2145.0: 8.9486e-02,
2150.0: 8.4639e-02,
2155.0: 8.4840e-02,
2160.0: 8.4170e-02,
2165.0: 7.6310e-02,
2170.0: 8.1996e-02,
2175.0: 8.0448e-02,
2180.0: 8.1808e-02,
2185.0: 7.4550e-02,
2190.0: 7.9068e-02,
2195.0: 7.8992e-02,
2200.0: 7.1202e-02,
2205.0: 7.4010e-02,
2210.0: 7.9315e-02,
2215.0: 7.6273e-02,
2220.0: 7.7730e-02,
2225.0: 7.5453e-02,
2230.0: 7.5773e-02,
2235.0: 7.4299e-02,
2240.0: 7.3118e-02,
2245.0: 7.0838e-02,
2250.0: 7.1937e-02,
2255.0: 6.7690e-02,
2260.0: 6.6929e-02,
2265.0: 6.8137e-02,
2270.0: 6.4867e-02,
2275.0: 6.4021e-02,
2280.0: 6.6288e-02,
2285.0: 6.3080e-02,
2290.0: 6.3220e-02,
2295.0: 6.1265e-02,
2300.0: 5.8824e-02,
2305.0: 5.9171e-02,
2310.0: 6.3870e-02,
2315.0: 5.8141e-02,
2320.0: 5.2031e-02,
2325.0: 5.6215e-02,
2330.0: 5.6824e-02,
2335.0: 5.7967e-02,
2340.0: 4.5836e-02,
2345.0: 5.1400e-02,
2350.0: 4.1536e-02,
2355.0: 4.7473e-02,
2360.0: 5.0237e-02,
2365.0: 4.9409e-02,
2370.0: 3.0817e-02,
2375.0: 4.4147e-02,
2380.0: 4.2552e-02,
2385.0: 3.0826e-02,
2390.0: 3.7109e-02,
2395.0: 4.0594e-02,
2400.0: 4.4150e-02,
2405.0: 3.3599e-02,
2410.0: 3.3813e-02,
2415.0: 2.7300e-02,
2420.0: 2.6590e-02,
2425.0: 3.3078e-02,
2430.0: 4.5099e-02,
2435.0: 1.4878e-02,
2440.0: 4.3249e-02,
2445.0: 2.0798e-02,
2450.0: 1.3611e-02,
2455.0: 2.4853e-02,
2460.0: 3.3363e-02,
2465.0: 2.4148e-02,
2470.0: 1.6727e-02,
2475.0: 1.6455e-02,
2480.0: 8.0395e-03,
2485.0: 5.6102e-03,
2490.0: 3.5113e-03,
2495.0: 2.8772e-03,
2500.0: 7.0642e-03,
2505.0: 1.5191e-03,
2510.0: 2.2163e-03,
2515.0: 5.1880e-04,
2520.0: 3.7054e-04,
2525.0: 4.1393e-05,
2530.0: 6.3593e-07,
2535.0: 1.7502e-07,
2540.0: 3.7716e-07,
2545.0: 5.3758e-11,
2550.0: 2.8222e-13,
2555.0: 1.0435e-09,
2560.0: 3.1020e-11,
2565.0: 1.5955e-14,
2570.0: 1.5258e-18,
2575.0: 1.0786e-27,
2580.0: 3.8214e-22,
2585.0: 1.7194e-34,
2590.0: 5.4793e-31,
2595.0: 2.2838e-33,
2600.0: 4.4912e-28,
2605.0: 5.8053e-35,
2610.0: 5.9447e-34,
2615.0: 1.1196e-37,
2620.0: 5.6505e-29,
2625.0: 3.8687e-28,
2630.0: 2.8026e-45,
2635.0: 3.9027e-16,
2640.0: 1.1750e-16,
2645.0: 8.9988e-19,
2650.0: 1.4295e-19,
2655.0: 1.3133e-27,
2660.0: 2.6068e-25,
2665.0: 1.1123e-37,
2670.0: 0.0000e+00,
2675.0: 0.0000e+00,
2680.0: 0.0000e+00,
2685.0: 0.0000e+00,
2690.0: 1.0226e-29,
2695.0: 7.1284e-33,
2700.0: 0.0000e+00,
2705.0: 2.9315e-42,
2710.0: 1.1250e-35,
2715.0: 3.8557e-26,
2720.0: 5.6052e-45,
2725.0: 7.2935e-22,
2730.0: 6.0734e-19,
2735.0: 5.4888e-21,
2740.0: 2.3314e-27,
2745.0: 1.3146e-23,
2750.0: 1.6648e-28,
2755.0: 6.7262e-44,
2760.0: 0.0000e+00,
2765.0: 2.6777e-27,
2770.0: 8.3791e-24,
2775.0: 3.9990e-38,
2780.0: 4.8067e-34,
2785.0: 3.8866e-27,
2790.0: 1.2170e-16,
2795.0: 3.6205e-16,
2800.0: 1.6484e-12,
2805.0: 6.7478e-14,
2810.0: 4.0233e-10,
2815.0: 2.8685e-10,
2820.0: 2.0548e-11,
2825.0: 1.7605e-07,
2830.0: 3.9008e-06,
2835.0: 2.1276e-10,
2840.0: 1.9609e-07,
2845.0: 4.0575e-05,
2850.0: 1.1566e-06,
2855.0: 4.4867e-07,
2860.0: 2.5356e-05,
2865.0: 1.6763e-04,
2870.0: 6.3129e-06,
2875.0: 3.9170e-04,
2880.0: 2.4724e-04,
2885.0: 4.5332e-04,
2890.0: 1.8623e-04,
2895.0: 2.6643e-03,
2900.0: 8.1152e-04,
2905.0: 1.1096e-04,
2910.0: 2.7220e-03,
2915.0: 1.2581e-03,
2920.0: 2.8948e-03,
2925.0: 1.0835e-03,
2930.0: 5.8858e-03,
2935.0: 6.4903e-03,
2940.0: 1.6273e-03,
2945.0: 1.4489e-03,
2950.0: 5.2276e-03,
2955.0: 2.3361e-03,
2960.0: 4.5971e-03,
2965.0: 7.4379e-03,
2970.0: 3.5233e-04,
2975.0: 8.5429e-04,
2980.0: | |
import logging
from datetime import timedelta as td
import numpy as np
from dateutil.parser import parse
import dateutil.tz
import gevent
from volttron.platform.agent.math_utils import mean, stdev
from volttron.platform.agent.base_market_agent import MarketAgent
from volttron.platform.agent.base_market_agent.poly_line import PolyLine
from volttron.platform.agent.base_market_agent.point import Point
from volttron.platform.agent.base_market_agent.buy_sell import BUYER
from volttron.platform.agent.utils import setup_logging, format_timestamp, get_aware_utc_now
from volttron.platform.vip.agent import Agent, Core
from volttron.platform.messaging import topics, headers as headers_mod
from volttron.platform.jsonrpc import RemoteError
from volttron.platform.vip.agent import errors
_log = logging.getLogger(__name__)
setup_logging()
__version__ = '0.3'
class TransactiveBase(MarketAgent):
def __init__(self, config, **kwargs):
super(TransactiveBase, self).__init__(**kwargs)
self.actuation_enabled = False
self.actuation_disabled = False
self.current_datetime = None
self.current_schedule = None
self.current_hour = None
self.current_price = None
self.actuation_obj = None
self.flexibility = None
self.ct_flexibility = None
self.off_setpoint = None
self.occupied = False
self.mapped = None
self.oat_predictions = []
self.market_prices = {}
self.day_ahead_prices = []
self.last_24_hour_prices = []
self.input_topics = []
self.inputs = {}
self.outputs = {}
self.schedule = {}
self.commodity = "Electricity"
campus = config.get("campus", "")
building = config.get("building", "")
base_record_list = ["tnc", campus, building]
base_record_list = list(filter(lambda a: a != "", base_record_list))
self.record_topic = '/'.join(base_record_list)
# set actuation parameters for device control
actuate_topic = config.get("actuation_enable_topic", "default")
if actuate_topic == "default":
self.actuate_topic = '/'.join([campus, building, 'actuate'])
else:
self.actuate_topic = actuate_topic
self.actuate_onstart = config.get("actuation_enabled_onstart", False)
self.actuation_method = config.get("actuation_method", "periodic")
self.actuation_rate = config.get("control_interval", 300)
self.price_multiplier = config.get("price_multiplier", 1.0)
self.default_min_price = 0.01
self.default_max_price = 0.1
self.default_price = config.get("fallback_price", 0.05)
input_data_tz = config.get("input_data_timezone", "UTC")
self.input_data_tz = dateutil.tz.gettz(input_data_tz)
inputs = config.get("inputs", [])
self._outputs = config.get("outputs", [])
schedule = config.get("schedule", {})
self.init_inputs(inputs)
self.init_schedule(schedule)
market_name = config.get("market_name", "electric")
tns_integration = config.get("tns", True)
if tns_integration:
self.market_number = 24
self.single_market_contol_interval = None
else:
self.market_number = 1
self.single_market_contol_interval = config.get("single_market_contol_interval", 15)
self.market_name = []
for i in range(self.market_number):
self.market_name.append('_'.join([market_name, str(i)]))
self.update_flag = []
self.demand_curve = []
self.actuation_price_range = None
self.prices = []
@Core.receiver('onstart')
def setup(self, sender, **kwargs):
"""
On start.
:param sender:
:param kwargs:
:return:
"""
self.init_outputs(self._outputs)
self.init_actuation_state(self.actuate_topic, self.actuate_onstart)
self.init_input_subscriptions()
self.vip.pubsub.subscribe(peer='pubsub',
prefix='mixmarket/start_new_cycle',
callback=self.update_prices)
def init_markets(self):
for market in self.market_name:
self.join_market(market, BUYER, None, self.offer_callback,
None, self.price_callback, self.error_callback)
self.update_flag.append(False)
self.demand_curve.append(PolyLine())
def init_inputs(self, inputs):
for input_info in inputs:
point = input_info.pop("point")
mapped = input_info.pop("mapped")
topic = input_info.pop("topic")
value = input_info.pop("inital_value")
self.inputs[mapped] = {point: value}
if topic not in self.input_topics:
self.input_topics.append(topic)
def init_outputs(self, outputs):
for output_info in outputs:
# Topic to subscribe to for data (currently data format must be
# consistent with a MasterDriverAgent all publish)
topic = output_info["topic"]
# Point name from as published by MasterDriverAgent
point = output_info.pop("point")
mapped = output_info.pop("mapped")
# Options for release are None or default
# None assumes BACnet release via priority array
# default will safe original value, at start of agent run for control restoration
# TODO: Update release value anytime agent has state tranistion for actuation_enable
release = output_info.get("release", None)
# Constant offset to apply to apply to determined actuation value
offset = output_info.get("offset", 0.0)
# VIP identity of Actuator to call via RPC to perform control of device
actuator = output_info.get("actuator", "platform.actuator")
# This is the flexibility range for the market commodity the
# transactive agent will utilize
flex = output_info["flexibility_range"]
# This is the flexibility of the control point, by default the same as the
# market commodity but not necessarily
ct_flex = output_info.get("control_flexibility", flex)
ct_flex, flex = self.set_control(ct_flex, flex)
fallback = output_info.get("fallback", mean(ct_flex))
# TODO: Use condition to determine multiple output scenario
condition = output_info.get("condition", True)
try:
value = self.vip.rpc.call(actuator,
'get_point',
topic).get(timeout=10)
except (RemoteError, gevent.Timeout, errors.VIPError) as ex:
_log.warning("Failed to get {} - ex: {}".format(topic, str(ex)))
value = fallback
if isinstance(release, str) and release.lower() == "default" and value is not None:
release_value = value
else:
release_value = None
off_setpoint = output_info.get("off_setpoint", value)
self.outputs[mapped] = {
"point": point,
"topic": topic,
"actuator": actuator,
"release": release_value,
"value": value,
"off_setpoint": off_setpoint,
"offset": offset,
"flex": flex,
"ct_flex": ct_flex,
"condition": condition
}
def set_control(self, ct_flex, flex):
ct_flex = np.linspace(ct_flex[0], ct_flex[1], 11)
flex = np.linspace(flex[0], flex[1], 11)
return ct_flex, flex
def init_input_subscriptions(self):
for topic in self.input_topics:
_log.debug('Subscribing to: ' + topic)
self.vip.pubsub.subscribe(peer='pubsub',
prefix=topic,
callback=self.update_input_data)
def init_schedule(self, schedule):
if schedule:
for day_str, schedule_info in schedule.items():
_day = parse(day_str).weekday()
if schedule_info not in ["always_on", "always_off"]:
start = parse(schedule_info["start"]).time()
end = parse(schedule_info["end"]).time()
self.schedule[_day] = {"start": start, "end": end}
else:
self.schedule[_day] = schedule_info
def init_actuation_state(self, actuate_topic, actuate_onstart):
if self._outputs:
self.vip.pubsub.subscribe(peer='pubsub',
prefix=actuate_topic,
callback=self.update_actuation_state)
if actuate_onstart:
self.update_actuation_state(None, None, None, None, None, True)
self.actuation_disabled = False
else:
_log.info("{} - cannot initialize actuation state, no configured outputs.".format(self.agent_name))
def check_schedule(self, dt):
current_schedule = self.schedule[dt.weekday()]
if "always_on" in current_schedule:
self.occupied = True
if not self.actuation_enabled:
self.update_actuation_state(None, None, None, None, None, True)
return
if "always_off" in current_schedule:
self.occupied = False
if self.actuation_enabled:
self.update_actuation_state(None, None, None, None, None, False)
return
_start = current_schedule["start"]
_end = current_schedule["end"]
if _start < self.current_datetime.time() < _end:
self.occupied = True
if not self.actuation_enabled:
self.update_actuation_state(None, None, None, None, None, True)
else:
self.occupied = False
if self.actuation_enabled:
self.update_actuation_state(None, None, None, None, None, False)
def check_future_schedule(self, dt):
current_schedule = self.schedule[dt.weekday()]
if "always_on" in current_schedule:
return True
if "always_off" in current_schedule:
return False
_start = current_schedule["start"]
_end = current_schedule["end"]
if _start < dt.time() < _end:
return True
else:
return False
def update_actuation_state(self, peer, sender, bus, topic, headers, message):
state = message
if self.actuation_disabled:
if sender is None:
_log.debug("{} is disabled not change in actuation state".format(self.agent_name))
return
elif bool(state):
_log.debug("{} is re-enabled for actuation.".format(self.agent_name))
self.actuation_disabled = False
if not self.actuation_disabled:
if sender is not None and not bool(state):
_log.debug("{} is disabled for actuation.".format(self.agent_name))
self.actuation_disabled = True
_log.debug("update actuation {}".format(state))
if self.actuation_enabled and not bool(state):
for output_info in self.outputs.values():
topic = output_info["topic"]
release = output_info["release"]
actuator = output_info["actuator"]
if self.actuation_obj is not None:
self.actuation_obj.kill()
self.actuation_obj = None
self.actuate(topic, release, actuator)
elif not self.actuation_enabled and bool(state):
for name, output_info in self.outputs.items():
offset = output_info.get("offset", 0.0)
actuator = output_info.get("actuator", "platform.actuator")
topic = output_info["topic"]
release = output_info.get("release", None)
if isinstance(release, str) and release.lower() == "default":
try:
release_value = self.vip.rpc.call(actuator,
'get_point',
topic).get(timeout=10)
except (RemoteError, gevent.Timeout, errors.VIPError) as ex:
_log.warning("Failed to get {} - ex: {}".format(topic, str(ex)))
else:
release_value = None
self.outputs[name]["release"] = release_value
if self.actuation_method == "periodic":
self.actuation_obj = self.core.periodic(self.actuation_rate, self.do_actuation, wait=self.actuation_rate)
self.actuation_enabled = state
def update_outputs(self, name, price):
if price is None:
if self.current_price is None:
return
price = self.current_price
sets = self.outputs[name]["ct_flex"]
if self.actuation_price_range is not None:
prices = self.actuation_price_range
else:
prices = self.determine_prices()
value = self.determine_control(sets, prices, price)
self.outputs[name]["value"] = value
point = self.outputs.get("point", name)
topic_suffix = "/".join([self.agent_name, "Actuate"])
message = {point: value, "Price": price}
self.publish_record(topic_suffix, message)
def do_actuation(self, price=None):
_log.debug("actuation {}".format(self.outputs))
for name, output_info in self.outputs.items():
if not output_info["condition"]:
continue
self.update_outputs(name, price)
topic = output_info["topic"]
point = output_info["point"]
actuator = output_info["actuator"]
value = output_info.get("value")
offset = output_info["offset"]
if value is not None:
value = value + offset
self.actuate(topic, value, actuator)
def actuate(self, point_topic, value, actuator):
try:
self.vip.rpc.call(actuator,
'set_point',
self.agent_name,
point_topic,
value).get(timeout=10)
except (RemoteError, gevent.Timeout, errors.VIPError) as ex:
_log.warning("Failed to set {} - ex: {}".format(point_topic, str(ex)))
def offer_callback(self, timestamp, market_name, buyer_seller):
for name, output in self.outputs.items():
output_info = output
self.mapped = name
if output["condition"]:
break
self.flexibility = output_info["flex"]
self.ct_flexibility = output_info["ct_flex"]
self.off_setpoint = output_info["off_setpoint"]
market_index = self.market_name.index(market_name)
if market_index > 0:
while not self.update_flag[market_index - 1]:
gevent.sleep(1)
if market_index == len(self.market_name) - 1:
for i in range(len(self.market_name)):
self.update_flag[i] = False
if market_index == 0 and self.current_datetime is not None:
self.init_predictions(output_info)
sched_index = self.determine_sched_index(market_index)
market_time = self.current_datetime + td(hours=market_index + 1)
occupied = self.check_future_schedule(market_time)
demand_curve = self.create_demand_curve(market_index, sched_index, occupied)
self.demand_curve[market_index] = demand_curve
result, message = self.make_offer(market_name, buyer_seller, demand_curve)
def create_demand_curve(self, market_index, sched_index, occupied):
_log.debug("{} debug demand_curve - index: {} - sched: {}".format(self.agent_name,
market_index,
sched_index))
demand_curve = PolyLine()
prices = self.determine_prices()
ct_flx = []
for i in range(len(prices)):
if occupied:
_set = self.ct_flexibility[i]
else:
_set = self.off_setpoint
ct_flx.append(_set)
q = self.get_q(_set, sched_index, market_index, occupied)
demand_curve.add(Point(price=prices[i], quantity=q))
ct_flx = [min(ct_flx), max(ct_flx)] if ct_flx else []
topic_suffix = "/".join([self.agent_name, "DemandCurve"])
message = {"MarketIndex": market_index, "Curve": demand_curve.tuppleize(), "Commodity": self.commodity}
_log.debug("{} debug demand_curve - curve: {}".format(self.agent_name, demand_curve.points))
self.publish_record(topic_suffix, message)
return demand_curve
def price_callback(self, timestamp, market_name, buyer_seller, price, quantity):
market_index = self.market_name.index(market_name)
if price is None:
if self.market_prices:
try:
price = self.market_prices[market_index]
_log.warn("{} - market {} did not clear, using market_prices!".format(self.agent_name, market_name))
except IndexError:
_log.warn("{} | |
(detection_all[3][0], detection_all[3][1])
#self.target_height = target_height
self.target_height = detection_all[4][3]
target_width = detection_all[4][2]
target = (detection_all[4][0], detection_all[4][1])
ref_x = int(w/2)
ref_y = int(h*0.35)
self.axis_speed = self.cmd_axis_speed.copy()
#Is there a Picture Command ?
if self.picture_approach:
cls_number = int(self.classes_dict[self.picture_target])
print (str(self.picture_target) + 'is' + str(cls_number))
print (self.picture_target + ' values:' + str(detection_all[cls_number]))
# If no pic target found --> rotate
if (detection_all[cls_number][0] + detection_all[cls_number][1]) == 0:
log.info(f'searching for {self.picture_target}')
if time.time() - self.search_start_time < 8:
self.axis_speed["yaw"] = 60
else:
print('stopped searching after 8 seconds')
self.axis_speed["yaw"] = 0
self.picture_approach = False
# If pic target found set as new tracking target
else:
print('pic target found')
self.axis_speed["yaw"] = 0
if self.timestamp_pic_target_found is None:
self.timestamp_pic_target_found = time.time()
log.info(f'found {self.picture_target}')
target = (detection_all[cls_number][0], detection_all[cls_number][1])
self.target_height = detection_all[cls_number][3]
#If Human Head:
if cls_number == 4:
self.keep_distance = pic_proximity*0.75
else:
self.keep_distance = pic_proximity
self.pid_pitch = PID(0.15,0.0,0.1,setpoint=0,output_limits=(-30,30))
self.tracking = True
# If voice command 'come home' activate RTH
if self.rth:
self.target_height = detection_all[4][3]
target_width = detection_all[4][2]
target = (detection_all[4][0], detection_all[4][1])
self.keep_distance = proximity*0.75
self.toggle_tracking(tracking=True)
if self.timestamp_take_picture:
if time.time() - self.timestamp_take_picture > 2:
self.timestamp_take_picture = None
self.drone.take_picture()
else:
if self.tracking:
if target != (0,0):
if self.distance_mode:
# Locked distance mode
if self.keep_distance is None:
self.keep_distance = self.target_height
#self.graph_distance = RollingGraph(window_name="Distance", y_max=500, threshold=self.keep_distance, waitKey=False)
if self.palm_landing_approach:
self.keep_distance = proximity
self.timestamp_keep_distance = time.time()
log.info("APPROACHING on pose")
self.pid_pitch = PID(0.2,0.0,0.1,setpoint=0,output_limits=(-30,30))
#self.graph_distance = RollingGraph(window_name="Distance", y_max=500, threshold=self.keep_distance, waitKey=False)
self.body_in_prev_frame = True
xoff = int(target[0]-ref_x)
yoff = int(ref_y-target[1])
#We draw an arrow from the reference point to the head we are targeting
color = (0,0,255)
cv2.circle(frame, (ref_x, ref_y), 10, color, 1,cv2.LINE_AA)
cv2.line(frame, (ref_x, ref_y), target, color, 4)
cv2.rectangle(frame, (target[0]-target_width//2, target[1]-self.target_height//2),
(target[0]+target_width//2, target[1]+self.target_height//2),color,4)
# The PID controllers calculate the new speeds for yaw and throttle
self.axis_speed["yaw"] = int(-self.pid_yaw(xoff))
#log.debug(f"xoff: {xoff} - speed_yaw: {self.axis_speed['yaw']}")
self.last_rotation_is_cw = self.axis_speed["yaw"] > 0
self.axis_speed["throttle"] = int(-self.pid_throttle(yoff))
#log.debug(f"yoff: {yoff} - speed_throttle: {self.axis_speed['throttle']}")
#If in locked distance mode
if self.keep_distance and self.target_height:
# Check RTH
if self.rth and self.target_height>=self.keep_distance:
self.rth = False
elif self.palm_landing_approach and self.target_height>self.keep_distance:
# The drone is now close enough to the body
# Let's do the palm landing
log.info("PALM LANDING after approaching")
self.palm_landing_approach = False
self.toggle_tracking(tracking=False)
self.palm_land()
elif self.picture_approach and \
abs(self.target_height-self.keep_distance) < 15 and \
xoff < 12 and yoff < 15:
# The drone is now close enough to the pic target
# Let's take a picture
self.toggle_tracking(tracking=False)
print('take a picture')
self.drone.take_picture()
self.picture_approach = False
self.timestamp_pic_target_found = None
self.pid_pitch = PID(0.3,0.0,0.1,setpoint=0,output_limits=(-70,70))
else:
self.axis_speed["pitch"] = int(self.pid_pitch(self.target_height-self.keep_distance))
log.debug(f"Target distance: {self.keep_distance} - cur: {self.target_height} -speed_pitch: {self.axis_speed['pitch']}")
if abs(head_pos[1] - hand_pos[1])<30:
if self.timestamp_hand_ctrl is None:
self.timestamp_hand_ctrl = time.time()
if time.time() - self.timestamp_hand_ctrl > 1:
if self.head_hand_x_dist is None:
self.head_hand_x_ref = head_pos[0]-hand_pos[0]
self.hand_ctrl = True
self.head_hand_x_dist = head_pos[0]-hand_pos[0]
self.axis_speed["roll"] = int(-self.pid_roll(self.head_hand_x_ref - self.head_hand_x_dist))
#print (f'head hand X distance: {abs(head_pos[0]-hand_pos[0])}')
else:
self.hand_ctrl = False
self.timestamp_hand_ctrl = None
self.head_hand_x_dist = None
else: # Tracking but no body detected
if self.body_in_prev_frame:
self.timestamp_no_body = time.time()
self.body_in_prev_frame = False
self.axis_speed["throttle"] = self.prev_axis_speed["throttle"]
self.axis_speed["yaw"] = self.prev_axis_speed["yaw"]
else:
if time.time() - self.timestamp_no_body < 1:
print("NO BODY SINCE < 1", self.axis_speed, self.prev_axis_speed)
self.axis_speed["throttle"] = self.prev_axis_speed["throttle"]
self.axis_speed["yaw"] = self.prev_axis_speed["yaw"]
else:
log.debug("NO BODY detected for 1s -> rotate")
self.axis_speed["yaw"] = self.def_speed["yaw"] * (1 if self.last_rotation_is_cw else -1)
# Send axis commands to the drone
for axis, command in self.axis_command.items():
if self.axis_speed[axis]is not None and self.axis_speed[axis] != self.prev_axis_speed[axis]:
#log.debug(f"COMMAND {axis} : {self.axis_speed[axis]}")
command(self.axis_speed[axis])
self.prev_axis_speed[axis] = self.axis_speed[axis]
else:
# This line is necessary to display current values in 'self.write_hud'
self.axis_speed[axis] = self.prev_axis_speed[axis]
# Write the HUD on the frame
frame = self.write_hud(frame)
return frame
def write_hud(self, frame):
"""
Draw drone info on frame
"""
class HUD:
def __init__(self, def_color=(255, 170, 0)):
self.def_color = def_color
self.infos = []
def add(self, info, color=None):
if color is None: color = self.def_color
self.infos.append((info, color))
def draw(self, frame):
i=0
for (info, color) in self.infos:
cv2.putText(frame, info, (0, 30 + (i * 30)),
cv2.FONT_HERSHEY_SIMPLEX,
1.0, color, 2) #lineType=30)
i+=1
hud = HUD()
tello_color = (0,255,0)
if self.debug: hud.add(datetime.datetime.now().strftime('%H:%M:%S'))
hud.add(f"FPS {self.fps.get():.2f}")
if self.debug: hud.add(f"VR {self.video_encoder_rate}")
hud.add(f"BAT {self.battery}")
if self.is_flying:
hud.add("FLYING", (0,255,0))
else:
hud.add("NOT FLYING", (0,0,255))
hud.add(f"TRACKING {'ON' if self.tracking else 'OFF'}", (0,255,0) if self.tracking else (0,0,255) )
hud.add(f"EXPO {self.exposure}")
#hud.add(f"ALT {self.ref_pos_x}")
if self.hand_ctrl:
hud.add(f"HAND Ctrl {self.ref_pos_x}",(0,0,255))
hud.add(f"HEAD_HAND_DIST {self.head_hand_x_ref - self.head_hand_x_dist}")
if self.axis_speed['yaw'] > 0:
hud.add(f"CW {self.axis_speed['yaw']}", tello_color)
elif self.axis_speed['yaw'] < 0:
hud.add(f"CCW {-self.axis_speed['yaw']}", tello_color)
else:
hud.add(f"CW 0")
if self.axis_speed['roll'] > 0:
hud.add(f"RIGHT {self.axis_speed['roll']}", tello_color)
elif self.axis_speed['roll'] < 0:
hud.add(f"LEFT {-self.axis_speed['roll']}", tello_color)
else:
hud.add(f"RIGHT 0")
if self.axis_speed['pitch'] > 0:
hud.add(f"FORWARD {self.axis_speed['pitch']}", tello_color)
elif self.axis_speed['pitch'] < 0:
hud.add(f"BACKWARD {-self.axis_speed['pitch']}", tello_color)
else:
hud.add(f"FORWARD 0")
if self.axis_speed['throttle'] > 0:
hud.add(f"UP {self.axis_speed['throttle']}", tello_color)
elif self.axis_speed['throttle'] < 0:
hud.add(f"DOWN {-self.axis_speed['throttle']}",tello_color)
else:
hud.add(f"UP 0")
if self.keep_distance:
hud.add(f"Target distance: {self.keep_distance} - curr: {self.target_height}", (0,255,0))
#if self.target_height: self.graph_distance.new_iter([self.target_height])
if self.timestamp_take_picture: hud.add("Taking a picture",tello_color)
if self.palm_landing:
hud.add("Palm landing...", tello_color)
if self.palm_landing_approach:
hud.add("In approach for palm landing...", tello_color)
if self.tracking and not self.body_in_prev_frame and time.time() - self.timestamp_no_body > 0.5:
hud.add("Searching...", tello_color)
if self.manual_control:
hud.add("Manual Control...", tello_color)
if self.throw_ongoing:
hud.add("Throw ongoing...", tello_color)
if self.scheduled_takeoff:
seconds_left = int(self.scheduled_takeoff - time.time())
hud.add(f"Takeoff in {seconds_left}s")
hud.draw(frame)
return frame
def take_picture(self):
"""
Tell drone to take picture, image sent to file handler
"""
self.drone.take_picture()
def set_exposure(self, expo):
"""
Change exposure of drone camera
"""
if expo == 0:
self.exposure = 0
elif expo == 1:
self.exposure = min(9, self.exposure+1)
elif expo == -1:
self.exposure = max(-9, self.exposure-1)
self.drone.set_exposure(self.exposure)
log.info(f"EXPOSURE {self.exposure}")
def palm_land_approach(self):
self.palm_landing_approach = True
print('Palm Landing Approach')
def palm_land(self):
"""
Tell drone to land
"""
self.palm_landing = True
self.drone.palm_land()
def throw_and_go(self, tracking=False):
"""
Tell drone to start a 'throw and go'
"""
self.drone.throw_and_go()
self.tracking_after_takeoff = tracking
def delayed_takeoff(self, delay=5):
self.scheduled_takeoff = time.time()+delay
self.tracking_after_takeoff = True
def clockwise_degrees(self, degrees):
self.yaw_to_consume = degrees
self.yaw_consumed = 0
self.prev_yaw = self.yaw
def toggle_distance_mode(self):
self.distance_mode = not self.distance_mode
if not self.distance_mode:
self.keep_distance = None
log.info('distance_mode '+("ON" if self.distance_mode else "OFF"))
def toggle_use_voice(self):
self.use_voice = not self.use_voice
log.info('use_voice '+("ON" if self.use_voice else "OFF"))
def toogle_manual_control(self):
self.manual_control = not self.manual_control
if self.manual_control:
self.axis_speed = { "yaw":0, "roll":0, "pitch":0, "throttle":0}
self.keep_distance = None
log.info('manual_control '+("ON" if self.manual_control else "OFF"))
def toggle_tracking(self, tracking=None):
"""
If tracking is None, toggle value of self.tracking
Else self.tracking take the same value as tracking
"""
if tracking is None:
self.tracking = not self.tracking
else:
self.tracking = tracking
if self.tracking:
log.info("ACTIVATE TRACKING")
# Start an explarotary 360
#self.clockwise_degrees(360)
# Init a PID controller for the yaw
#self.pid_yaw = PID(0.25,0,0,setpoint=0,output_limits=(-100,100))
# ... and one for the throttle
#self.pid_throttle = PID(0.4,0,0,setpoint=0,output_limits=(-80,100))
# self.init_tracking = True
else:
self.axis_speed = { "yaw":0, "roll":0, "pitch":0, "throttle":0}
self.keep_distance = None
return
def flight_data_handler(self, event, sender, data):
"""
Listener to flight data from the drone.
"""
self.battery = data.battery_percentage
self.fly_mode = data.fly_mode
self.throw_fly_timer = data.throw_fly_timer
self.throw_ongoing = data.throw_fly_timer > 0
# print("fly_mode",data.fly_mode)
# print("throw_fly_timer",data.throw_fly_timer)
# print("em_ground",data.em_ground)
# print("em_sky",data.em_sky)
# print("electrical_machinery_state",data.electrical_machinery_state)
#print("em_sky",data.em_sky,"em_ground",data.em_ground,"em_open",data.em_open)
#print("height",data.height,"imu_state",data.imu_state,"down_visual_state",data.down_visual_state)
if self.is_flying != data.em_sky:
self.is_flying = data.em_sky
log.debug(f"FLYING : {self.is_flying}")
if not self.is_flying:
self.reset()
else:
if self.tracking_after_takeoff:
log.info("Tracking on after takeoff")
self.toggle_tracking(True)
log.debug(f"MODE: {self.fly_mode} - Throw fly timer: {self.throw_fly_timer}")
def log_data_handler(self, event, sender, data):
"""
Listener to log data from the drone.
"""
pos_x = -data.mvo.pos_x
pos_y = -data.mvo.pos_y
pos_z = -data.mvo.pos_z
self.ref_pos_x = pos_x
#print(f'pos x = {pos_x}, pos y = {pos_y}, pos z = {pos_z}')
# if abs(pos_x)+abs(pos_y)+abs(pos_z) > 0.07:
# if self.ref_pos_x == -1: # First time we have meaningful values, we store them as reference
# self.ref_pos_x = pos_x
# self.ref_pos_y = pos_y
# self.ref_pos_z = pos_z
# else:
# self.pos_x = pos_x - self.ref_pos_x
# self.pos_y = pos_y - self.ref_pos_y
# self.pos_z = pos_z - self.ref_pos_z
qx = data.imu.q1
qy = data.imu.q2
qz = data.imu.q3
qw = data.imu.q0
self.yaw = quat_to_yaw_deg(qx,qy,qz,qw)
#print(f'yaw = {self.yaw}')
if | |
<gh_stars>1-10
import pandas as pd
from glob import glob
from os.path import join
from .derived import *
from pvlib.solarposition import get_solarposition
import datetime
def process_cabauw_data(csv_path, out_file, nan_column=("soil_water", "TH03"), cabauw_lat=51.971, cabauw_lon=4.926,
elevation=-0.7, reflect_counter_gradient=False, average_period=None):
"""
This function loads all of the cabauw data files and then calculates the relevant derived quantities necessary
to build the machine learning parameterization.
Columns in derived data show follow the following convention "name of variable_level_units". Underscores separate
different components, and spaces separate words in each subsection. `df.columns.str.split("_").str[0]` extracts
the variable names, `df.columns.str.split("_").str[1]` extracts levels, and `df.columns.str.split("_").str[0]`
extracts units.
Args:
csv_path: Path to all csv files.
out_file: Where derived data are written to.
nan_column: Column used to filter bad examples.
cabauw_lat: Latitude of tower site in degrees.
cabauw_lon: Longitude of tower site in degrees.
elevation: Elevation of site in meters.
reflect_counter_gradient: Change the sign of counter gradient sensible and latent heat flux values.
average_period: Window obs are averaged over.
Returns:
`pandas.DataFrame` containing derived data.
"""
csv_files = sorted(glob(join(csv_path, "*.csv")))
file_types = ["_".join(csv_file.split("/")[-1].split("_")[1:-1]) for csv_file in csv_files]
data = dict()
for c, csv_file in enumerate(csv_files):
print(csv_file)
data[file_types[c]] = pd.read_csv(csv_file, na_values=[-9999.0])
data[file_types[c]].index = pd.to_datetime(data[file_types[c]]["TimeStr"], format="%Y%m%d.%H:%M")
print("combine data")
combined_data = pd.concat(data, axis=1, join="inner")
combined_data = combined_data.loc[~pd.isna(combined_data[nan_column])]
# List of columns included in data
derived_columns = ["global horizontal irradiance_0 m_W m-2",
"zenith_0 m_degrees",
"azimuth_0 m_degrees",
"temperature_2 m_K",
"temperature_10 m_K",
"temperature_20 m_K",
"temperature_40 m_K",
"pressure_2 m_hPa",
"potential temperature_2 m_K",
"potential temperature_10 m_K",
"potential temperature_20 m_K",
"potential temperature_40 m_K",
"virtual potential temperature_2 m_K",
"virtual potential temperature_10 m_K",
"virtual potential temperature_20 m_K",
"virtual potential temperature_40 m_K",
"mixing ratio_2 m_g kg-1",
"mixing ratio_10 m_g kg-1",
"mixing ratio_20 m_g kg-1",
"mixing ratio_40 m_g kg-1",
"relative humidity_2 m_%",
"relative humidity_10 m_%",
"relative humidity_20 m_%",
"relative humidity_40 m_%",
"temperature change_4 m_K m-1",
"temperature change_15 m_K m-1",
"temperature change_30 m_K m-1",
"mixing ratio change_4 m_g kg-1 m-1",
"mixing ratio change_15 m_g kg-1 m-1",
"mixing ratio change_30 m_g kg-1 m-1",
"upward longwave irradiance_0 m_W m-2",
"downward longwave irradiance_0 m_W m-2",
"upward shortwave irradiance_0 m_W m-2",
"skin temperature_0 m_K",
"skin potential temperature_0 m_K",
"skin saturation mixing ratio_0 m_g kg-1",
"skin virtual potential temperature_0 m_K",
"potential temperature skin change_2 m_K m-1",
"potential temperature skin change_10 m_K m-1",
"potential temperature skin change_20 m_K m-1",
"potential temperature skin change_40 m_K m-1",
"virtual potential temperature skin change_2 m_K m-1",
"virtual potential temperature skin change_10 m_K m-1",
"virtual potential temperature skin change_20 m_K m-1",
"virtual potential temperature skin change_40 m_K m-1",
"mixing ratio skin change_2 m_g kg-1 m-1",
"mixing ratio skin change_10 m_g kg-1 m-1",
"mixing ratio skin change_20 m_g kg-1 m-1",
"mixing ratio skin change_40 m_g kg-1 m-1",
"air density_10 m_kg m-3",
"air density_2 m_kg m-3",
"wind speed_10 m_m s-1",
"wind direction_10 m_degrees",
"wind speed_20 m_m s-1",
"wind direction_20 m_degrees",
"wind speed_40 m_m s-1",
"wind direction_40 m_degrees",
"u wind_10 m_m s-1",
"v wind_10 m_m s-1",
"u wind_20 m_m s-1",
"v wind_20 m_m s-1",
"u wind_40 m_m s-1",
"v wind_40 m_m s-1",
"soil temperature_0 cm_K",
"soil temperature_4 cm_K",
"soil potential temperature_0 cm_K",
"soil potential temperature_4 cm_K",
"soil water content_3 cm_m3 m-3",
# "soil water content_8 cm_m3 m-3",
"moisture availability_soil_",
# "moisture availability_8 cm_",
"bulk richardson_10 m_",
"bulk richardson_2 m_",
"bulk richardson_10-2 m_",
"obukhov length_surface_m",
"sensible heat flux_surface_W m-2",
"latent heat flux_surface_W m-2",
"friction velocity_surface_m s-1",
"temperature scale_surface_K",
"soil heat flux_surface_W m-2",
"moisture scale_surface_g kg-1",
"kinematic sensible heat flux_surface_K m s-1",
"kinematic latent heat flux_surface_g kg-1 m s-1"
]
derived_data = pd.DataFrame(index=combined_data.index, columns=derived_columns, dtype=float)
solar_data = get_solarposition(combined_data.index, cabauw_lat, cabauw_lon, altitude=elevation, method="nrel_numba")
print("calculate derived variables")
derived_data["global horizontal irradiance_0 m_W m-2"] = combined_data[("surface", "SWD")]
derived_data["zenith_0 m_degrees"] = solar_data["zenith"]
derived_data["azimuth_0 m_degrees"] = solar_data["azimuth"]
derived_data["pressure_2 m_hPa"] = combined_data[("surface", "P0")]
for height in [2, 10, 20, 40]:
derived_data[f"temperature_{height:d} m_K"] = combined_data[("tower", f"TA_{height:d}m")]
derived_data[f"mixing ratio_{height:d} m_g kg-1"] = combined_data[("tower", f"Q_{height:d}m")]
derived_data[f"relative humidity_{height:d} m_%"] = combined_data[("tower", f"RH_{height:d}m")]
derived_data[f"potential temperature_{height:d} m_K"] = potential_temperature(
derived_data[f"temperature_{height:d} m_K"],
derived_data["pressure_2 m_hPa"])
derived_data[f"virtual potential temperature_{height:d} m_K"] = virtual_temperature(
derived_data[f"potential temperature_{height:d} m_K"], derived_data[f"mixing ratio_{height:d} m_g kg-1"])
heights = [2, 10, 20, 40]
for dh, diff_height in enumerate([4, 15, 30]):
derived_data[f"temperature change_{diff_height:d} m_K m-1"] = (derived_data[
f"temperature_{heights[dh + 1]:d} m_K"] -
derived_data[
f"temperature_{heights[dh]:d} m_K"]) / (
heights[dh + 1] - heights[dh])
derived_data[f"mixing ratio change_{diff_height:d} m_g kg-1 m-1"] = (derived_data[
f"mixing ratio_{heights[dh + 1]:d} m_g kg-1"] -
derived_data[
f"mixing ratio_{heights[dh]:d} m_g kg-1"]) / (
heights[dh + 1] - heights[dh])
derived_data["virtual potential temperature_2 m_K"] = virtual_temperature(
derived_data["potential temperature_2 m_K"], derived_data["mixing ratio_2 m_g kg-1"])
derived_data["air density_10 m_kg m-3"] = air_density(virtual_temperature(derived_data["temperature_10 m_K"],
derived_data["mixing ratio_10 m_g kg-1"]),
derived_data["pressure_2 m_hPa"])
derived_data["air density_2 m_kg m-3"] = air_density(virtual_temperature(derived_data["temperature_2 m_K"],
derived_data["mixing ratio_2 m_g kg-1"]),
derived_data["pressure_2 m_hPa"])
for height in [10, 20, 40]:
derived_data["wind speed_{0:d} m_m s-1".format(height)] = combined_data[("tower", "F_{0:d}m".format(height))]
derived_data["wind direction_{0:d} m_degrees".format(height)] = combined_data[
("tower", "D_{0:d}m".format(height))]
derived_data["u wind_{0:d} m_m s-1".format(height)], derived_data["v wind_{0:d} m_m s-1".format(height)] = \
wind_components(derived_data["wind speed_{0:d} m_m s-1".format(height)],
derived_data["wind direction_{0:d} m_degrees".format(height)])
derived_data["soil temperature_0 cm_K"] = celsius_to_kelvin(combined_data[("soil", "TS00")])
derived_data["soil temperature_4 cm_K"] = celsius_to_kelvin(combined_data[("soil", "TS04")])
derived_data["soil potential temperature_0 cm_K"] = potential_temperature(derived_data["soil temperature_0 cm_K"],
derived_data["pressure_2 m_hPa"])
derived_data["soil potential temperature_4 cm_K"] = potential_temperature(derived_data["soil temperature_4 cm_K"],
derived_data["pressure_2 m_hPa"])
derived_data["soil water content_3 cm_m3 m-3"] = combined_data[("soil_water", "TH03")]
# derived_data["soil water content_8 cm_m3 m-3"] = combined_data[("soil_water", "TH08")]
derived_data["moisture availability_soil_"] = moisture_availability(derived_data["soil water content_3 cm_m3 m-3"],
field_capacity=0.82)
# derived_data["moisture availability_8 cm_"] = moisture_availability(derived_data["soil water content_8 cm_m3 m-3"])
derived_data["upward longwave irradiance_0 m_W m-2"] = combined_data[("irrad", "LWU")]
derived_data["downward longwave irradiance_0 m_W m-2"] = combined_data[("irrad", "LWD")]
derived_data["upward shortwave irradiance_0 m_W m-2"] = combined_data[("irrad", "SWU")]
derived_data["skin temperature_0 m_K"] = skin_temperature(derived_data["upward longwave irradiance_0 m_W m-2"])
derived_data["skin potential temperature_0 m_K"] = potential_temperature(derived_data["skin temperature_0 m_K"],
derived_data["pressure_2 m_hPa"])
derived_data["skin saturation mixing ratio_0 m_g kg-1"] = saturation_mixing_ratio(
derived_data["skin temperature_0 m_K"],
derived_data["pressure_2 m_hPa"])
derived_data["skin virtual potential temperature_0 m_K"] = virtual_temperature(
derived_data["skin potential temperature_0 m_K"],
derived_data["skin saturation mixing ratio_0 m_g kg-1"])
for height in [2, 10, 20, 40]:
derived_data[f"potential temperature skin change_{height:d} m_K m-1"] = \
(derived_data[f"potential temperature_{height:d} m_K"] - derived_data["skin potential temperature_0 m_K"]) \
/ height
derived_data[f"virtual potential temperature skin change_{height:d} m_K m-1"] = \
(derived_data[f"virtual potential temperature_{height:d} m_K"] - derived_data["skin virtual potential temperature_0 m_K"]) \
/ height
derived_data[f"mixing ratio skin change_{height:d} m_g kg-1 m-1"] = \
derived_data["moisture availability_soil_"] * (derived_data[f"mixing ratio_{height:d} m_g kg-1"] -
derived_data["skin saturation mixing ratio_0 m_g kg-1"]) \
/ height
derived_data["friction velocity_surface_m s-1"] = np.maximum(combined_data[("flux", "UST")], 0.001)
derived_data["sensible heat flux_surface_W m-2"] = combined_data[("flux", "H")]
derived_data["latent heat flux_surface_W m-2"] = combined_data[("flux", "LE")]
derived_data["soil heat flux_surface_W m-2"] = combined_data[("flux", "G0")]
derived_data["kinematic sensible heat flux_surface_K m s-1"] = kinematic_sensible_heat_flux(derived_data["sensible heat flux_surface_W m-2"], derived_data["air density_10 m_kg m-3"])
derived_data["kinematic latent heat flux_surface_g kg-1 m s-1"] = kinematic_latent_heat_flux(derived_data["latent heat flux_surface_W m-2"], derived_data["air density_10 m_kg m-3"])
if reflect_counter_gradient:
sh_counter_gradient = (derived_data[f"potential temperature skin change_10 m_K m-1"] *
derived_data["sensible heat flux_surface_W m-2"]) < 0
lh_counter_gradient = (derived_data[f"mixing ratio skin change_10 m_g kg-1 m-1"] *
derived_data["latent heat flux_surface_W m-2"]) < 0
derived_data.loc[sh_counter_gradient,
"sensible heat flux_surface_W m-2"] = -derived_data.loc[sh_counter_gradient,
"sensible heat flux_surface_W m-2"]
derived_data.loc[lh_counter_gradient,
"latent heat flux_surface_W m-2"] = -derived_data.loc[lh_counter_gradient,
"latent heat flux_surface_W m-2"]
derived_data["temperature scale_surface_K"] = temperature_scale(derived_data["sensible heat flux_surface_W m-2"],
derived_data["air density_10 m_kg m-3"],
derived_data["friction velocity_surface_m s-1"])
derived_data["moisture scale_surface_g kg-1"] = moisture_scale(derived_data["latent heat flux_surface_W m-2"],
derived_data["air density_10 m_kg m-3"],
derived_data["friction velocity_surface_m s-1"])
derived_data["bulk richardson_10 m_"] = bulk_richardson_number(derived_data["potential temperature_10 m_K"],
10,
derived_data["mixing ratio_10 m_g kg-1"],
derived_data[
"skin potential temperature_0 m_K"],
derived_data["wind speed_10 m_m s-1"])
derived_data["bulk richardson_2 m_"] = bulk_richardson_number(derived_data["potential temperature_2 m_K"],
2,
derived_data["mixing ratio_2 m_g kg-1"],
derived_data[
"skin virtual potential temperature_0 m_K"],
derived_data["wind speed_10 m_m s-1"])
derived_data["bulk richardson_10-2 m_"] = bulk_richardson_number(derived_data["potential temperature_10 m_K"],
10,
derived_data["mixing ratio_10 m_g kg-1"],
derived_data[
"virtual potential temperature_2 m_K"],
derived_data["wind speed_10 m_m s-1"])
derived_data["obukhov length_surface_m"] = obukhov_length(derived_data["potential temperature_10 m_K"],
derived_data["temperature scale_surface_K"],
derived_data["friction velocity_surface_m s-1"])
if average_period is not None:
derived_data = derived_data.rolling(window=average_period).mean()
derived_data = derived_data.dropna()
"""
"""
# Create the header with updated column names
header_names = []
for col in derived_columns:
parts = col.split("_")
var = parts[0].replace(" ", "_")
level = parts[1].replace(" ", "_")
unit = parts[2].replace(" ", "_")
if unit == '':
unit = 'None'
name = "%s:%s:%s" % (var, level, unit)
# print ("%s --> %s" % (col, name))
header_names.append(name)
derived_data.to_csv(out_file, columns=derived_columns, header=header_names, index_label="Time")
return derived_data
def load_derived_data(filename,
train_test_split_date, dropna=True, filter_counter_gradient=False):
"""
Load derived | |
# noqa: D100
from typing import Optional
import numpy as np
import xarray
from xclim.core.calendar import resample_doy
from xclim.core.units import (
convert_units_to,
declare_units,
pint2cfunits,
rate2amount,
str2pint,
to_agg_units,
)
from . import run_length as rl
from ._conversion import rain_approximation, snowfall_approximation
from .generic import select_resample_op, threshold_count
# Frequencies : YS: year start, QS-DEC: seasons starting in december, MS: month start
# See http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases
# -------------------------------------------------- #
# ATTENTION: ASSUME ALL INDICES WRONG UNTIL TESTED ! #
# -------------------------------------------------- #
__all__ = [
"blowing_snow",
"cold_spell_duration_index",
"cold_and_dry_days",
"daily_freezethaw_cycles",
"daily_temperature_range",
"daily_temperature_range_variability",
"days_over_precip_thresh",
"extreme_temperature_range",
"fraction_over_precip_thresh",
"heat_wave_frequency",
"heat_wave_max_length",
"heat_wave_total_length",
"high_precip_low_temp",
"liquid_precip_ratio",
"precip_accumulation",
"rain_on_frozen_ground_days",
"tg90p",
"tg10p",
"tn90p",
"tn10p",
"tx90p",
"tx10p",
"tx_tn_days_above",
"warm_spell_duration_index",
"winter_rain_ratio",
]
@declare_units(tasmin="[temperature]", tn10="[temperature]")
def cold_spell_duration_index(
tasmin: xarray.DataArray, tn10: xarray.DataArray, window: int = 6, freq: str = "YS"
) -> xarray.DataArray:
r"""Cold spell duration index.
Number of days with at least six consecutive days where the daily minimum temperature is below the 10th
percentile.
Parameters
----------
tasmin : xarray.DataArray
Minimum daily temperature.
tn10 : xarray.DataArray
10th percentile of daily minimum temperature with `dayofyear` coordinate.
window : int
Minimum number of days with temperature below threshold to qualify as a cold spell.
freq : str
Resampling frequency.
Returns
-------
xarray.DataArray, [time]
Count of days with at least six consecutive days where the daily minimum temperature is below the 10th
percentile.
Notes
-----
Let :math:`TN_i` be the minimum daily temperature for the day of the year :math:`i` and :math:`TN10_i` the 10th
percentile of the minimum daily temperature over the 1961-1990 period for day of the year :math:`i`, the cold spell
duration index over period :math:`\phi` is defined as:
.. math::
\sum_{i \in \phi} \prod_{j=i}^{i+6} \left[ TN_j < TN10_j \right]
where :math:`[P]` is 1 if :math:`P` is true, and 0 if false.
References
----------
From the Expert Team on Climate Change Detection, Monitoring and Indices (ETCCDMI).
Examples
--------
# Note that this example does not use a proper 1961-1990 reference period.
>>> from xclim.core.calendar import percentile_doy
>>> from xclim.indices import cold_spell_duration_index
>>> tasmin = xr.open_dataset(path_to_tasmin_file).tasmin.isel(lat=0, lon=0)
>>> tn10 = percentile_doy(tasmin, per=10).sel(percentiles=10)
>>> cold_spell_duration_index(tasmin, tn10)
"""
tn10 = convert_units_to(tn10, tasmin)
# Create time series out of doy values.
thresh = resample_doy(tn10, tasmin)
below = tasmin < thresh
out = below.resample(time=freq).map(
rl.windowed_run_count, window=window, dim="time"
)
return to_agg_units(out, tasmin, "count")
def cold_and_dry_days(
tas: xarray.DataArray, tgin25, pr, wet25, freq: str = "YS"
) -> xarray.DataArray:
r"""Cold and dry days.
Returns the total number of days where "Cold" and "Dry" conditions coincide.
Parameters
----------
tas : xarray.DataArray
Mean daily temperature values,
tgin25 : xarray.DataArray
First quartile of daily mean temperature computed by month.
pr : xarray.DataArray
Daily precipitation.
wet25 : xarray.DataArray
First quartile of daily total precipitation computed by month.
freq : str
Resampling frequency.
Returns
-------
xarray.DataArray,
The total number of days where cold and dry conditions coincide.
Notes
-----
Formula to be written [cold_dry_days]_.
References
----------
.. [cold_dry_days] <NAME>. (2009). Trends in joint quantiles of temperature and precipitation in Europe
since 1901 and projected for 2100. Geophysical Research Letters, 36(7). https://doi.org/10.1029/2008GL037119
"""
raise NotImplementedError
# There is an issue with the 1 mm threshold. It makes no sense to assume a day with < 1mm is not dry.
#
# c1 = tas < convert_units_to(tgin25, tas)
# c2 = (pr > convert_units_to('1 mm', pr)) * (pr < convert_units_to(wet25, pr))
# c = (c1 * c2) * 1
# return c.resample(time=freq).sum(dim='time')
@declare_units(
tasmax="[temperature]",
tasmin="[temperature]",
thresh_tasmax="[temperature]",
thresh_tasmin="[temperature]",
)
def daily_freezethaw_cycles(
tasmin: xarray.DataArray,
tasmax: xarray.DataArray,
thresh_tasmax: str = "0 degC",
thresh_tasmin: str = "0 degC",
freq: str = "YS",
) -> xarray.DataArray: # noqa: D401
r"""Number of days with a diurnal freeze-thaw cycle.
The number of days where Tmax > thresh_tasmax and Tmin <= thresh_tasmin.
Parameters
----------
tasmin : xarray.DataArray
Minimum daily temperature.
tasmax : xarray.DataArray
Maximum daily temperature.
thresh_tasmax : str
The temperature threshold needed to trigger a thaw event.
thresh_tasmin : str
The temperature threshold needed to trigger a freeze event.
freq : str
Resampling frequency.
Returns
-------
xarray.DataArray, [time]
Number of days with a diurnal freeze-thaw cycle
Notes
-----
Let :math:`TX_{i}` be the maximum temperature at day :math:`i` and :math:`TN_{i}` be
the daily minimum temperature at day :math:`i`. Then the number of freeze thaw cycles
during period :math:`\phi` is given by :
.. math::
\sum_{i \in \phi} [ TX_{i} > 0℃ ] [ TN_{i} < 0℃ ]
where :math:`[P]` is 1 if :math:`P` is true, and 0 if false.
"""
thaw_threshold = convert_units_to(thresh_tasmax, tasmax)
freeze_threshold = convert_units_to(thresh_tasmin, tasmin)
ft = (tasmin <= freeze_threshold) * (tasmax > thaw_threshold) * 1
out = ft.resample(time=freq).sum(dim="time")
return to_agg_units(out, tasmin, "count")
@declare_units(tasmax="[temperature]", tasmin="[temperature]")
def daily_temperature_range(
tasmin: xarray.DataArray,
tasmax: xarray.DataArray,
freq: str = "YS",
op: str = "mean",
) -> xarray.DataArray:
r"""Statistics of daily temperature range.
The mean difference between the daily maximum temperature and the daily minimum temperature.
Parameters
----------
tasmin : xarray.DataArray
Minimum daily temperature.
tasmax : xarray.DataArray
Maximum daily temperature.
freq : str
Resampling frequency.
op : {'min', 'max', 'mean', 'std'} or func
Reduce operation. Can either be a DataArray method or a function that can be applied to a DataArray.
Returns
-------
xarray.DataArray, [same units as tasmin]
The average variation in daily temperature range for the given time period.
Notes
-----
For a default calculation using `op='mean'` :
Let :math:`TX_{ij}` and :math:`TN_{ij}` be the daily maximum and minimum temperature at day :math:`i`
of period :math:`j`. Then the mean diurnal temperature range in period :math:`j` is:
.. math::
DTR_j = \frac{ \sum_{i=1}^I (TX_{ij} - TN_{ij}) }{I}
"""
tasmax = convert_units_to(tasmax, tasmin)
dtr = tasmax - tasmin
out = select_resample_op(dtr, op=op, freq=freq)
u = str2pint(tasmax.units)
out.attrs["units"] = pint2cfunits(u - u)
return out
@declare_units(tasmax="[temperature]", tasmin="[temperature]")
def daily_temperature_range_variability(
tasmin: xarray.DataArray, tasmax: xarray.DataArray, freq: str = "YS"
) -> xarray.DataArray:
r"""Mean absolute day-to-day variation in daily temperature range.
Mean absolute day-to-day variation in daily temperature range.
Parameters
----------
tasmin : xarray.DataArray
Minimum daily temperature.
tasmax : xarray.DataArray
Maximum daily temperature.
freq : str
Resampling frequency.
Returns
-------
xarray.DataArray, [same units as tasmin]
The average day-to-day variation in daily temperature range for the given time period.
Notes
-----
Let :math:`TX_{ij}` and :math:`TN_{ij}` be the daily maximum and minimum temperature at
day :math:`i` of period :math:`j`. Then calculated is the absolute day-to-day differences in
period :math:`j` is:
.. math::
vDTR_j = \frac{ \sum_{i=2}^{I} |(TX_{ij}-TN_{ij})-(TX_{i-1,j}-TN_{i-1,j})| }{I}
"""
tasmax = convert_units_to(tasmax, tasmin)
vdtr = abs((tasmax - tasmin).diff(dim="time"))
out = vdtr.resample(time=freq).mean(dim="time")
u = str2pint(tasmax.units)
out.attrs["units"] = pint2cfunits(u - u)
return out
@declare_units(tasmax="[temperature]", tasmin="[temperature]")
def extreme_temperature_range(
tasmin: xarray.DataArray, tasmax: xarray.DataArray, freq: str = "YS"
) -> xarray.DataArray:
r"""Extreme intra-period temperature range.
The maximum of max temperature (TXx) minus the minimum of min temperature (TNn) for the given time period.
Parameters
----------
tasmin : xarray.DataArray
Minimum daily temperature.
tasmax : xarray.DataArray
Maximum daily temperature.
freq : str
Resampling frequency.
Returns
-------
xarray.DataArray, [same units as tasmin]
Extreme intra-period temperature range for the given time period.
Notes
-----
Let :math:`TX_{ij}` and :math:`TN_{ij}` be the daily maximum and minimum temperature at day :math:`i`
of period :math:`j`. Then the extreme temperature range in period :math:`j` is:
.. math::
ETR_j = max(TX_{ij}) - min(TN_{ij})
"""
tasmax = convert_units_to(tasmax, tasmin)
tx_max = tasmax.resample(time=freq).max(dim="time")
tn_min = tasmin.resample(time=freq).min(dim="time")
out = tx_max - tn_min
u = str2pint(tasmax.units)
out.attrs["units"] = pint2cfunits(u - u)
return out
@declare_units(
tasmin="[temperature]",
tasmax="[temperature]",
thresh_tasmin="[temperature]",
thresh_tasmax="[temperature]",
)
def heat_wave_frequency(
tasmin: xarray.DataArray,
tasmax: xarray.DataArray,
thresh_tasmin: str = "22.0 degC",
thresh_tasmax: str = "30 degC",
window: int = 3,
freq: str = "YS",
) -> xarray.DataArray:
r"""Heat wave frequency.
Number of heat waves over a given period. A heat wave is defined as an event
where the minimum and maximum daily temperature both exceeds specific thresholds
over a minimum number of days.
Parameters
----------
tasmin : xarray.DataArray
Minimum daily temperature.
tasmax : xarray.DataArray
Maximum daily temperature.
thresh_tasmin : str
The minimum temperature threshold needed to trigger a heatwave event.
thresh_tasmax : str
The maximum temperature threshold needed to trigger a heatwave event.
window : int
Minimum number of days with temperatures above thresholds to qualify as a heatwave.
freq : str
Resampling frequency.
Returns
-------
xarray.DataArray, [dimensionless]
Number of heatwave at | |
CJK
# Unified Ideographs area, followed by U+3051 HIRAGANA LETTER KE)
self.assertGdbRepr(u'\u6587\u5b57\u5316\u3051')
# Test a character outside the BMP:
# U+1D121 MUSICAL SYMBOL C CLEF
# This is:
# UTF-8: 0xF0 0x9D 0x84 0xA1
# UTF-16: 0xD834 0xDD21
# This will only work on wide-unicode builds:
self.assertGdbRepr(u"\U0001D121")
def test_sets(self):
'Verify the pretty-printing of sets'
self.assertGdbRepr(set())
rep = self.get_gdb_repr("print set(['a', 'b'])")[0]
self.assertTrue(rep.startswith("set(["))
self.assertTrue(rep.endswith("])"))
self.assertEqual(eval(rep), {'a', 'b'})
rep = self.get_gdb_repr("print set([4, 5])")[0]
self.assertTrue(rep.startswith("set(["))
self.assertTrue(rep.endswith("])"))
self.assertEqual(eval(rep), {4, 5})
# Ensure that we handled sets containing the "dummy" key value,
# which happens on deletion:
gdb_repr, gdb_output = self.get_gdb_repr('''s = set(['a','b'])
s.pop()
print s''')
self.assertEqual(gdb_repr, "set(['b'])")
def test_frozensets(self):
'Verify the pretty-printing of frozensets'
self.assertGdbRepr(frozenset())
rep = self.get_gdb_repr("print frozenset(['a', 'b'])")[0]
self.assertTrue(rep.startswith("frozenset(["))
self.assertTrue(rep.endswith("])"))
self.assertEqual(eval(rep), {'a', 'b'})
rep = self.get_gdb_repr("print frozenset([4, 5])")[0]
self.assertTrue(rep.startswith("frozenset(["))
self.assertTrue(rep.endswith("])"))
self.assertEqual(eval(rep), {4, 5})
def test_exceptions(self):
# Test a RuntimeError
gdb_repr, gdb_output = self.get_gdb_repr('''
try:
raise RuntimeError("I am an error")
except RuntimeError, e:
print e
''')
self.assertEqual(gdb_repr,
"exceptions.RuntimeError('I am an error',)")
# Test division by zero:
gdb_repr, gdb_output = self.get_gdb_repr('''
try:
a = 1 / 0
except ZeroDivisionError, e:
print e
''')
self.assertEqual(gdb_repr,
"exceptions.ZeroDivisionError('integer division or modulo by zero',)")
def test_classic_class(self):
'Verify the pretty-printing of classic class instances'
gdb_repr, gdb_output = self.get_gdb_repr('''
class Foo:
pass
foo = Foo()
foo.an_int = 42
print foo''')
m = re.match(r'<Foo\(an_int=42\) at remote 0x[0-9a-f]+>', gdb_repr)
self.assertTrue(m,
msg='Unexpected classic-class rendering %r' % gdb_repr)
def test_modern_class(self):
'Verify the pretty-printing of new-style class instances'
gdb_repr, gdb_output = self.get_gdb_repr('''
class Foo(object):
pass
foo = Foo()
foo.an_int = 42
print foo''')
m = re.match(r'<Foo\(an_int=42\) at remote 0x[0-9a-f]+>', gdb_repr)
self.assertTrue(m,
msg='Unexpected new-style class rendering %r' % gdb_repr)
def test_subclassing_list(self):
'Verify the pretty-printing of an instance of a list subclass'
gdb_repr, gdb_output = self.get_gdb_repr('''
class Foo(list):
pass
foo = Foo()
foo += [1, 2, 3]
foo.an_int = 42
print foo''')
m = re.match(r'<Foo\(an_int=42\) at remote 0x[0-9a-f]+>', gdb_repr)
self.assertTrue(m,
msg='Unexpected new-style class rendering %r' % gdb_repr)
def test_subclassing_tuple(self):
'Verify the pretty-printing of an instance of a tuple subclass'
# This should exercise the negative tp_dictoffset code in the
# new-style class support
gdb_repr, gdb_output = self.get_gdb_repr('''
class Foo(tuple):
pass
foo = Foo((1, 2, 3))
foo.an_int = 42
print foo''')
m = re.match(r'<Foo\(an_int=42\) at remote 0x[0-9a-f]+>', gdb_repr)
self.assertTrue(m,
msg='Unexpected new-style class rendering %r' % gdb_repr)
def assertSane(self, source, corruption, expvalue=None, exptype=None):
'''Run Python under gdb, corrupting variables in the inferior process
immediately before taking a backtrace.
Verify that the variable's representation is the expected failsafe
representation'''
if corruption:
cmds_after_breakpoint=[corruption, 'backtrace']
else:
cmds_after_breakpoint=['backtrace']
gdb_repr, gdb_output = \
self.get_gdb_repr(source,
cmds_after_breakpoint=cmds_after_breakpoint)
if expvalue:
if gdb_repr == repr(expvalue):
# gdb managed to print the value in spite of the corruption;
# this is good (see http://bugs.python.org/issue8330)
return
if exptype:
pattern = '<' + exptype + ' at remote 0x[0-9a-f]+>'
else:
# Match anything for the type name; 0xDEADBEEF could point to
# something arbitrary (see http://bugs.python.org/issue8330)
pattern = '<.* at remote 0x[0-9a-f]+>'
m = re.match(pattern, gdb_repr)
if not m:
self.fail('Unexpected gdb representation: %r\n%s' % \
(gdb_repr, gdb_output))
def test_NULL_ptr(self):
'Ensure that a NULL PyObject* is handled gracefully'
gdb_repr, gdb_output = (
self.get_gdb_repr('print 42',
cmds_after_breakpoint=['set variable op=0',
'backtrace'])
)
self.assertEqual(gdb_repr, '0x0')
def test_NULL_ob_type(self):
'Ensure that a PyObject* with NULL ob_type is handled gracefully'
self.assertSane('print 42',
'set op->ob_type=0')
def test_corrupt_ob_type(self):
'Ensure that a PyObject* with a corrupt ob_type is handled gracefully'
self.assertSane('print 42',
'set op->ob_type=0xDEADBEEF',
expvalue=42)
def test_corrupt_tp_flags(self):
'Ensure that a PyObject* with a type with corrupt tp_flags is handled'
self.assertSane('print 42',
'set op->ob_type->tp_flags=0x0',
expvalue=42)
def test_corrupt_tp_name(self):
'Ensure that a PyObject* with a type with corrupt tp_name is handled'
self.assertSane('print 42',
'set op->ob_type->tp_name=0xDEADBEEF',
expvalue=42)
def test_NULL_instance_dict(self):
'Ensure that a PyInstanceObject with with a NULL in_dict is handled'
self.assertSane('''
class Foo:
pass
foo = Foo()
foo.an_int = 42
print foo''',
'set ((PyInstanceObject*)op)->in_dict = 0',
exptype='Foo')
def test_builtins_help(self):
'Ensure that the new-style class _Helper in site.py can be handled'
# (this was the issue causing tracebacks in
# http://bugs.python.org/issue8032#msg100537 )
gdb_repr, gdb_output = self.get_gdb_repr('print __builtins__.help', import_site=True)
m = re.match(r'<_Helper at remote 0x[0-9a-f]+>', gdb_repr)
self.assertTrue(m,
msg='Unexpected rendering %r' % gdb_repr)
def test_selfreferential_list(self):
'''Ensure that a reference loop involving a list doesn't lead proxyval
into an infinite loop:'''
gdb_repr, gdb_output = \
self.get_gdb_repr("a = [3, 4, 5] ; a.append(a) ; print a")
self.assertEqual(gdb_repr, '[3, 4, 5, [...]]')
gdb_repr, gdb_output = \
self.get_gdb_repr("a = [3, 4, 5] ; b = [a] ; a.append(b) ; print a")
self.assertEqual(gdb_repr, '[3, 4, 5, [[...]]]')
def test_selfreferential_dict(self):
'''Ensure that a reference loop involving a dict doesn't lead proxyval
into an infinite loop:'''
gdb_repr, gdb_output = \
self.get_gdb_repr("a = {} ; b = {'bar':a} ; a['foo'] = b ; print a")
self.assertEqual(gdb_repr, "{'foo': {'bar': {...}}}")
def test_selfreferential_old_style_instance(self):
gdb_repr, gdb_output = \
self.get_gdb_repr('''
class Foo:
pass
foo = Foo()
foo.an_attr = foo
print foo''')
self.assertTrue(re.match('<Foo\(an_attr=<\.\.\.>\) at remote 0x[0-9a-f]+>',
gdb_repr),
'Unexpected gdb representation: %r\n%s' % \
(gdb_repr, gdb_output))
def test_selfreferential_new_style_instance(self):
gdb_repr, gdb_output = \
self.get_gdb_repr('''
class Foo(object):
pass
foo = Foo()
foo.an_attr = foo
print foo''')
self.assertTrue(re.match('<Foo\(an_attr=<\.\.\.>\) at remote 0x[0-9a-f]+>',
gdb_repr),
'Unexpected gdb representation: %r\n%s' % \
(gdb_repr, gdb_output))
gdb_repr, gdb_output = \
self.get_gdb_repr('''
class Foo(object):
pass
a = Foo()
b = Foo()
a.an_attr = b
b.an_attr = a
print a''')
self.assertTrue(re.match('<Foo\(an_attr=<Foo\(an_attr=<\.\.\.>\) at remote 0x[0-9a-f]+>\) at remote 0x[0-9a-f]+>',
gdb_repr),
'Unexpected gdb representation: %r\n%s' % \
(gdb_repr, gdb_output))
def test_truncation(self):
'Verify that very long output is truncated'
gdb_repr, gdb_output = self.get_gdb_repr('print range(1000)')
self.assertEqual(gdb_repr,
"[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, "
"14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, "
"27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, "
"40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, "
"53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, "
"66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, "
"79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, "
"92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, "
"104, 105, 106, 107, 108, 109, 110, 111, 112, 113, "
"114, 115, 116, 117, 118, 119, 120, 121, 122, 123, "
"124, 125, 126, 127, 128, 129, 130, 131, 132, 133, "
"134, 135, 136, 137, 138, 139, 140, 141, 142, 143, "
"144, 145, 146, 147, 148, 149, 150, 151, 152, 153, "
"154, 155, 156, 157, 158, 159, 160, 161, 162, 163, "
"164, 165, 166, 167, 168, 169, 170, 171, 172, 173, "
"174, 175, 176, 177, 178, 179, 180, 181, 182, 183, "
"184, 185, 186, 187, 188, 189, 190, 191, 192, 193, "
"194, 195, 196, 197, 198, 199, 200, 201, 202, 203, "
"204, 205, 206, 207, 208, 209, 210, 211, 212, 213, "
"214, 215, 216, 217, 218, 219, 220, 221, 222, 223, "
"224, 225, 226...(truncated)")
self.assertEqual(len(gdb_repr),
1024 + len('...(truncated)'))
def test_builtin_function(self):
gdb_repr, gdb_output = self.get_gdb_repr('print len')
self.assertEqual(gdb_repr, '<built-in function len>')
def test_builtin_method(self):
gdb_repr, gdb_output = self.get_gdb_repr('import sys; print sys.stdout.readlines')
self.assertTrue(re.match('<built-in method readlines of file object at remote 0x[0-9a-f]+>',
gdb_repr),
'Unexpected gdb representation: %r\n%s' % \
(gdb_repr, gdb_output))
def test_frames(self):
gdb_output = self.get_stack_trace('''
def foo(a, b, c):
pass
foo(3, 4, 5)
print foo.__code__''',
breakpoint='PyObject_Print',
cmds_after_breakpoint=['print (PyFrameObject*)(((PyCodeObject*)op)->co_zombieframe)']
)
self.assertTrue(re.match(r'.*\s+\$1 =\s+Frame 0x[0-9a-f]+, for file <string>, line 3, in foo \(\)\s+.*',
gdb_output,
re.DOTALL),
'Unexpected gdb representation: %r\n%s' % (gdb_output, gdb_output))
@unittest.skipIf(python_is_optimized(),
"Python was compiled with optimizations")
class PyListTests(DebuggerTests):
def assertListing(self, expected, actual):
self.assertEndsWith(actual, expected)
def test_basic_command(self):
'Verify that the "py-list" command works'
bt = self.get_stack_trace(script=self.get_sample_script(),
cmds_after_breakpoint=['py-list'])
self.assertListing(' 5 \n'
' 6 def bar(a, b, c):\n'
' 7 baz(a, b, c)\n'
' 8 \n'
' 9 def baz(*args):\n'
' >10 print(42)\n'
' 11 \n'
' 12 foo(1, 2, 3)\n',
bt)
def test_one_abs_arg(self):
'Verify the "py-list" command with one absolute argument'
bt = self.get_stack_trace(script=self.get_sample_script(),
cmds_after_breakpoint=['py-list 9'])
self.assertListing(' 9 def baz(*args):\n'
' >10 print(42)\n'
' 11 \n'
' 12 foo(1, 2, 3)\n',
bt)
def test_two_abs_args(self):
'Verify the "py-list" command with two absolute arguments'
bt = self.get_stack_trace(script=self.get_sample_script(),
cmds_after_breakpoint=['py-list 1,3'])
self.assertListing(' 1 # Sample script for use by test_gdb.py\n'
' 2 \n'
' 3 def foo(a, b, c):\n',
bt)
class StackNavigationTests(DebuggerTests):
@unittest.skipUnless(HAS_PYUP_PYDOWN, "test requires py-up/py-down commands")
@unittest.skipIf(python_is_optimized(),
"Python was compiled | |
# Copyright (c) 2020 @ FBK - Fondazione B<NAME>
# Author: <NAME>
# Project: LUCID: A Practical, Lightweight Deep Learning Solution for DDoS Attack Detection
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import os
import sys
import csv
import glob
import h5py
import time
import pyshark
import socket
import pickle
import random
import hashlib
import argparse
import ipaddress
import numpy as np
from lxml import etree
from collections import OrderedDict
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.utils import shuffle as sklearn_shuffle
from multiprocessing import Process, Manager, Value, Queue
from util_functions import *
# Sample commands
# split a pcap file into smaller chunks to leverage multi-core CPUs: tcpdump -r dataset.pcap -w dataset-chunk -C 1000
# dataset parsing (first step): python3 lucid_dataset_parser.py --dataset_type SYN2020 --dataset_folder ./sample-dataset/ --packets_per_flow 10 --dataset_id SYN2020 --traffic_type all --time_window 10
# dataset parsing (second step): python3 lucid_dataset_parser.py --preprocess_folder ./sample-dataset/
IDS2018_DDOS_FLOWS = {'attackers': ['192.168.127.12', '192.168.3.11','192.168.127.12','192.168.127.12','192.168.3.11','172.16.17.32','192.168.127.12','172.16.17.32','192.168.3.11','192.168.127.12'],
'victims': ['172.16.17.32','172.31.69.28']}
IDS2017_DDOS_FLOWS = {'attackers': ['172.16.0.1'],
'victims': ['192.168.10.50']}
CUSTOM_DDOS_SYN = {'attackers': ['11.0.0.' + str(x) for x in range(1,255)],
'victims': ['10.42.0.2']}
DDOS_ATTACK_SPECS = {
'IDS2017' : IDS2017_DDOS_FLOWS,
'IDS2018' : IDS2018_DDOS_FLOWS,
'SYN2020' : CUSTOM_DDOS_SYN
}
vector_proto = CountVectorizer()
vector_proto.fit_transform(protocols).todense()
random.seed(SEED)
np.random.seed(SEED)
def get_pkt_direction(srcIP,dstIP):
internalIP = "192.168"
if internalIP in srcIP and internalIP in dstIP:
return 0
elif internalIP in srcIP:
return 1
elif internalIP in dstIP:
return 2
else:
print ("No private address in this flow!!!!")
return 3
class packet_features:
def __init__(self):
self.id_fwd = (0,0,0,0,0) # 5-tuple src_ip_addr, src_port,,dst_ip_addr,dst_port,protocol
self.id_bwd = (0,0,0,0,0) # 5-tuple src_ip_addr, src_port,,dst_ip_addr,dst_port,protocol
self.features_list = []
def __str__(self):
return "{} -> {}".format(self.id_fwd, self.features_list)
def get_ddos_flows(attackers,victims):
DDOS_FLOWS = {}
if '/' in attackers: # subnet
DDOS_FLOWS['attackers'] = [str(ip) for ip in list(ipaddress.IPv4Network(attackers).hosts())]
else: # single address
DDOS_FLOWS['attackers'] = [str(ipaddress.IPv4Address(attackers))]
if '/' in victims: # subnet
DDOS_FLOWS['victims'] = [str(ip) for ip in list(ipaddress.IPv4Network(victims).hosts())]
else: # single address
DDOS_FLOWS['victims'] = [str(ipaddress.IPv4Address(victims))]
return DDOS_FLOWS
# function that build the labels based on the dataset type
def parse_labels(dataset_type=None, attackers=None,victims=None):
output_dict = {}
if attackers is not None and victims is not None:
DDOS_FLOWS = get_ddos_flows(attackers, victims)
elif dataset_type is not None and dataset_type in DDOS_ATTACK_SPECS:
DDOS_FLOWS = DDOS_ATTACK_SPECS[dataset_type]
else:
return None
for attacker in DDOS_FLOWS['attackers']:
for victim in DDOS_FLOWS['victims']:
ip_src = str(attacker)
ip_dst = str(victim)
key_fwd = (ip_src, ip_dst)
key_bwd = (ip_dst, ip_src)
if key_fwd not in output_dict:
output_dict[key_fwd] = 1
if key_bwd not in output_dict:
output_dict[key_bwd] = 1
return output_dict
def parse_packet(pkt):
pf = packet_features()
tmp_id = [0,0,0,0,0]
try:
pf.features_list.append(float(pkt.sniff_timestamp)) # timestampchild.find('Tag').text
pf.features_list.append(int(pkt.ip.len)) # packet length
pf.features_list.append(int(hashlib.sha256(str(pkt.highest_layer).encode('utf-8')).hexdigest(),
16) % 10 ** 8) # highest layer in the packet
pf.features_list.append(int(int(pkt.ip.flags, 16))) # IP flags
tmp_id[0] = str(pkt.ip.src) # int(ipaddress.IPv4Address(pkt.ip.src))
tmp_id[2] = str(pkt.ip.dst) # int(ipaddress.IPv4Address(pkt.ip.dst))
protocols = vector_proto.transform([pkt.frame_info.protocols]).toarray().tolist()[0]
protocols = [1 if i >= 1 else 0 for i in
protocols] # we do not want the protocols counted more than once (sometimes they are listed twice in pkt.frame_info.protocols)
protocols_value = int(np.dot(np.array(protocols), powers_of_two))
pf.features_list.append(protocols_value)
protocol = int(pkt.ip.proto)
tmp_id[4] = protocol
if pkt.transport_layer != None:
if protocol == socket.IPPROTO_TCP:
tmp_id[1] = int(pkt.tcp.srcport)
tmp_id[3] = int(pkt.tcp.dstport)
pf.features_list.append(int(pkt.tcp.len)) # TCP length
pf.features_list.append(int(pkt.tcp.ack)) # TCP ack
pf.features_list.append(int(pkt.tcp.flags, 16)) # TCP flags
pf.features_list.append(int(pkt.tcp.window_size_value)) # TCP window size
pf.features_list = pf.features_list + [0, 0] # UDP + ICMP positions
elif protocol == socket.IPPROTO_UDP:
pf.features_list = pf.features_list + [0, 0, 0, 0] # TCP positions
tmp_id[1] = int(pkt.udp.srcport)
pf.features_list.append(int(pkt.udp.length)) # UDP length
tmp_id[3] = int(pkt.udp.dstport)
pf.features_list = pf.features_list + [0] # ICMP position
elif protocol == socket.IPPROTO_ICMP:
pf.features_list = pf.features_list + [0, 0, 0, 0, 0] # TCP and UDP positions
pf.features_list.append(int(pkt.icmp.type)) # ICMP type
else:
pf.features_list = pf.features_list + [0, 0, 0, 0, 0, 0] # padding for layer3-only packets
tmp_id[4] = 0
pf.id_fwd = (tmp_id[0], tmp_id[1], tmp_id[2], tmp_id[3], tmp_id[4])
pf.id_bwd = (tmp_id[2], tmp_id[3], tmp_id[0], tmp_id[1], tmp_id[4])
return pf
except AttributeError as e:
# ignore packets that aren't TCP/UDP or IPv4
return None
# Offline preprocessing of pcap files for model training, validation and testing
def process_pcap(pcap_file,dataset_type,in_labels,max_flow_len,labelled_flows,traffic_type='all',time_window=TIME_WINDOW):
start_time = time.time()
temp_dict = OrderedDict()
start_time_window = -1
pcap_name = pcap_file.split("/")[-1]
print("Processing file: ", pcap_name)
cap = pyshark.FileCapture(pcap_file)
for i, pkt in enumerate(cap):
if i % 1000 == 0:
print(pcap_name + " packet #", i)
# start_time_window is used to group packets/flows captured in a time-window
if start_time_window == -1 or float(pkt.sniff_timestamp) > start_time_window + time_window:
start_time_window = float(pkt.sniff_timestamp)
pf = parse_packet(pkt)
temp_dict = store_packet(pf, temp_dict, start_time_window, max_flow_len)
apply_labels(temp_dict, labelled_flows, in_labels, traffic_type)
print('Completed file {} in {} seconds.'.format(pcap_name, time.time() - start_time))
# Transforms live traffic into input samples for inference
def process_live_traffic(cap, dataset_type, in_labels, max_flow_len, traffic_type='all',time_window=TIME_WINDOW):
start_time = time.time()
temp_dict = OrderedDict()
labelled_flows = []
start_time_window = start_time
time_window = start_time_window + time_window
if isinstance(cap, pyshark.LiveCapture) == True:
for pkt in cap.sniff_continuously():
if time.time() >= time_window:
break
pf = parse_packet(pkt)
temp_dict = store_packet(pf, temp_dict, start_time_window, max_flow_len)
elif isinstance(cap, pyshark.FileCapture) == True:
while time.time() < time_window:
pkt = cap.next()
pf = parse_packet(pkt)
temp_dict = store_packet(pf,temp_dict,start_time_window,max_flow_len)
apply_labels(temp_dict,labelled_flows, in_labels,traffic_type)
return labelled_flows
def store_packet(pf,temp_dict,start_time_window, max_flow_len):
if pf is not None:
if pf.id_fwd in temp_dict and start_time_window in temp_dict[pf.id_fwd] and \
temp_dict[pf.id_fwd][start_time_window].shape[0] < max_flow_len:
temp_dict[pf.id_fwd][start_time_window] = np.vstack(
[temp_dict[pf.id_fwd][start_time_window], pf.features_list])
elif pf.id_bwd in temp_dict and start_time_window in temp_dict[pf.id_bwd] and \
temp_dict[pf.id_bwd][start_time_window].shape[0] < max_flow_len:
temp_dict[pf.id_bwd][start_time_window] = np.vstack(
[temp_dict[pf.id_bwd][start_time_window], pf.features_list])
else:
if pf.id_fwd not in temp_dict and pf.id_bwd not in temp_dict:
temp_dict[pf.id_fwd] = {start_time_window: np.array([pf.features_list]), 'label': 0}
elif pf.id_fwd in temp_dict and start_time_window not in temp_dict[pf.id_fwd]:
temp_dict[pf.id_fwd][start_time_window] = np.array([pf.features_list])
elif pf.id_bwd in temp_dict and start_time_window not in temp_dict[pf.id_bwd]:
temp_dict[pf.id_bwd][start_time_window] = np.array([pf.features_list])
return temp_dict
def apply_labels(flows, labelled_flows, labels, traffic_type):
for five_tuple, flow in flows.items():
if labels is not None:
short_key = (five_tuple[0], five_tuple[2]) # for IDS2017/IDS2018 dataset the labels have shorter keys
flow['label'] = labels.get(short_key, 0)
for flow_key, packet_list in flow.items():
# relative time wrt the time of the first packet in the flow
if flow_key != 'label':
amin = np.amin(packet_list,axis=0)[0]
packet_list[:, 0] = packet_list[:, 0] - amin
if traffic_type == 'ddos' and flow['label'] == 0: # we only want malicious flows from this dataset
continue
elif traffic_type == 'benign' and flow['label'] > 0: # we only want benign flows from this dataset
continue
else:
labelled_flows.append((five_tuple,flow))
# returns the total number of flows
def count_flows(preprocessed_flows):
ddos_flows = 0
total_flows = len(preprocessed_flows)
ddos_fragments = 0
total_fragments = 0
for flow in preprocessed_flows:
flow_fragments = len(flow[1]) - 1
total_fragments += flow_fragments
if flow[1]['label'] > 0:
ddos_flows += 1
ddos_fragments += flow_fragments # the label does not count
return (total_flows, ddos_flows, total_flows - ddos_flows), (total_fragments, ddos_fragments, total_fragments-ddos_fragments)
# balance the dataset based on the number of benign and malicious fragments of flows
def balance_dataset(flows,total_fragments=float('inf')):
new_flow_list = []
_,(_, ddos_fragments, benign_fragments) = count_flows(flows)
if ddos_fragments == 0 or benign_fragments == 0:
min_fragments = total_fragments
else:
min_fragments = min(total_fragments/2,ddos_fragments,benign_fragments)
random.shuffle(flows)
new_benign_fragments = 0
new_ddos_fragments = 0
for flow in flows:
if flow[1]['label'] == 0 and (new_benign_fragments < min_fragments ):
new_benign_fragments += len(flow[1]) - 1
new_flow_list.append(flow)
elif flow[1]['label'] == 1 and (new_ddos_fragments < min_fragments):
new_ddos_fragments += len(flow[1]) - 1
new_flow_list.append(flow)
return new_flow_list, new_benign_fragments, new_ddos_fragments
# convert the dataset from dictionaries with 5-tuples keys into a list of flow fragments and another list of labels
def dataset_to_list_of_fragments(dataset):
keys = []
X = []
y = []
for flow in dataset:
tuple = flow[0]
flow_data = flow[1]
label = flow_data['label']
for key, fragment in flow_data.items():
if key != 'label':
X.append(fragment)
y.append(label)
keys.append(tuple)
return X,y,keys
def train_test_split(flow_list,train_size=TRAIN_SIZE, shuffle=True):
test_list = []
_,(total_examples,_,_) = count_flows(flow_list)
test_examples = total_examples - total_examples*train_size
if shuffle == True:
random.shuffle(flow_list)
current_test_examples = 0
while current_test_examples < test_examples:
flow = flow_list.pop(0)
test_list.append(flow)
current_test_examples += len(flow[1])-1
return flow_list,test_list
def main(argv):
command_options = " ".join(str(x) for x in argv[1:])
help_string = 'Usage[0]: python3 lucid_dataset_parser.py --dataset_type <dataset_name> --dataset_folder <folder path> --dataset_id <dataset identifier> --packets_per_flow <n> --time_window <t>\n' \
'Usage[1]: python3 lucid_dataset_parser.py --preprocess_folder <folder path>'
manager = Manager()
parser = argparse.ArgumentParser(
description='Dataset parser',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-d', '--dataset_folder', nargs='+', type=str,
help='Folder | |
'UserProfile'
db.create_table('auth_user_profile', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, null=True, blank=True)),
('user', self.gf('django.db.models.fields.related.OneToOneField')(related_name='profile', unique=True, to=orm['auth.User'])),
('default_ctnr', self.gf('django.db.models.fields.related.ForeignKey')(default=2, to=orm['cyder.Ctnr'])),
('phone_number', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
))
db.send_create_signal('cyder', ['UserProfile'])
# Adding model 'Task'
db.create_table(u'task', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('task', self.gf('django.db.models.fields.CharField')(max_length=255)),
('ttype', self.gf('django.db.models.fields.CharField')(max_length=255)),
))
db.send_create_signal('cyder', ['Task'])
# Adding model 'DynamicInterface'
db.create_table('dynamic_interface', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, null=True, blank=True)),
('expire', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('ctnr', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cyder.Ctnr'])),
('workgroup', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cyder.Workgroup'], null=True, blank=True)),
('system', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cyder.System'])),
('mac', self.gf('cyder.base.fields.MacAddrField')(max_length=17, null=True, dhcp_enabled='dhcp_enabled')),
('range', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cyder.Range'])),
('dhcp_enabled', self.gf('django.db.models.fields.BooleanField')(default=True)),
('last_seen', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
))
db.send_create_signal('cyder', ['DynamicInterface'])
# Adding unique constraint on 'DynamicInterface', fields ['range', 'mac']
db.create_unique('dynamic_interface', ['range_id', 'mac'])
# Adding model 'DynamicInterfaceAV'
db.create_table('dynamic_interface_av', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, null=True, blank=True)),
('value', self.gf('cyder.base.eav.fields.EAVValueField')(attribute_field='', max_length=255)),
('entity', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cyder.DynamicInterface'])),
('attribute', self.gf('cyder.base.eav.fields.EAVAttributeField')(to=orm['cyder.Attribute'])),
))
db.send_create_signal('cyder', ['DynamicInterfaceAV'])
# Adding unique constraint on 'DynamicInterfaceAV', fields ['entity', 'attribute']
db.create_unique('dynamic_interface_av', ['entity_id', 'attribute_id'])
# Adding model 'DNSBuildRun'
db.create_table('cyder_dnsbuildrun', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('log', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal('cyder', ['DNSBuildRun'])
# Adding model 'BuildManifest'
db.create_table('cyder_buildmanifest', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('zname', self.gf('django.db.models.fields.CharField')(max_length=256)),
('files', self.gf('django.db.models.fields.CharField')(max_length=256)),
('zhash', self.gf('django.db.models.fields.CharField')(max_length=256)),
('build_run', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cyder.DNSBuildRun'])),
))
db.send_create_signal('cyder', ['BuildManifest'])
# Adding model 'MX'
db.create_table('mx', (
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, null=True, blank=True)),
('domain', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cyder.Domain'])),
('label', self.gf('django.db.models.fields.CharField')(max_length=63, blank=True)),
('fqdn', self.gf('django.db.models.fields.CharField')(db_index=True, max_length=255, blank=True)),
('ttl', self.gf('django.db.models.fields.PositiveIntegerField')(default=3600, null=True, blank=True)),
('description', self.gf('django.db.models.fields.CharField')(max_length=1000, blank=True)),
('ctnr', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cyder.Ctnr'])),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('server', self.gf('django.db.models.fields.CharField')(max_length=100)),
('priority', self.gf('django.db.models.fields.PositiveIntegerField')()),
))
db.send_create_signal('cyder', ['MX'])
# Adding unique constraint on 'MX', fields ['domain', 'label', 'server']
db.create_unique('mx', ['domain_id', 'label', 'server'])
# Adding M2M table for field views on 'MX'
m2m_table_name = db.shorten_name('mx_views')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('mx', models.ForeignKey(orm['cyder.mx'], null=False)),
('view', models.ForeignKey(orm['cyder.view'], null=False))
))
db.create_unique(m2m_table_name, ['mx_id', 'view_id'])
# Adding model 'Nameserver'
db.create_table('nameserver', (
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, null=True, blank=True)),
('ttl', self.gf('django.db.models.fields.PositiveIntegerField')(default=3600, null=True, blank=True)),
('description', self.gf('django.db.models.fields.CharField')(max_length=1000, blank=True)),
('ctnr', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cyder.Ctnr'])),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('domain', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cyder.Domain'])),
('server', self.gf('django.db.models.fields.CharField')(max_length=255)),
('addr_glue', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='nameserver_set', null=True, to=orm['cyder.AddressRecord'])),
('intr_glue', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='nameserver_set', null=True, to=orm['cyder.StaticInterface'])),
))
db.send_create_signal('cyder', ['Nameserver'])
# Adding unique constraint on 'Nameserver', fields ['domain', 'server']
db.create_unique('nameserver', ['domain_id', 'server'])
# Adding M2M table for field views on 'Nameserver'
m2m_table_name = db.shorten_name('nameserver_views')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('nameserver', models.ForeignKey(orm['cyder.nameserver'], null=False)),
('view', models.ForeignKey(orm['cyder.view'], null=False))
))
db.create_unique(m2m_table_name, ['nameserver_id', 'view_id'])
# Adding model 'SOA'
db.create_table('soa', (
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, null=True, blank=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('ttl', self.gf('django.db.models.fields.PositiveIntegerField')(default=3600, null=True, blank=True)),
('primary', self.gf('django.db.models.fields.CharField')(max_length=100)),
('contact', self.gf('django.db.models.fields.CharField')(max_length=100)),
('serial', self.gf('django.db.models.fields.PositiveIntegerField')(default=1420739755)),
('expire', self.gf('django.db.models.fields.PositiveIntegerField')(default=1209600)),
('retry', self.gf('django.db.models.fields.PositiveIntegerField')(default=86400)),
('refresh', self.gf('django.db.models.fields.PositiveIntegerField')(default=180)),
('minimum', self.gf('django.db.models.fields.PositiveIntegerField')(default=180)),
('description', self.gf('django.db.models.fields.CharField')(max_length=200, blank=True)),
('root_domain', self.gf('django.db.models.fields.related.ForeignKey')(related_name='root_of_soa', unique=True, to=orm['cyder.Domain'])),
('dirty', self.gf('django.db.models.fields.BooleanField')(default=False)),
('is_signed', self.gf('django.db.models.fields.BooleanField')(default=False)),
('dns_enabled', self.gf('django.db.models.fields.BooleanField')(default=True)),
))
db.send_create_signal('cyder', ['SOA'])
# Adding model 'SOAAV'
db.create_table('soa_av', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, null=True, blank=True)),
('value', self.gf('cyder.base.eav.fields.EAVValueField')(attribute_field='', max_length=255)),
('entity', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cyder.SOA'])),
('attribute', self.gf('cyder.base.eav.fields.EAVAttributeField')(to=orm['cyder.Attribute'])),
))
db.send_create_signal('cyder', ['SOAAV'])
# Adding unique constraint on 'SOAAV', fields ['entity', 'attribute']
db.create_unique('soa_av', ['entity_id', 'attribute_id'])
# Adding model 'SRV'
db.create_table('srv', (
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, null=True, blank=True)),
('ttl', self.gf('django.db.models.fields.PositiveIntegerField')(default=3600, null=True, blank=True)),
('description', self.gf('django.db.models.fields.CharField')(max_length=1000, blank=True)),
('ctnr', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cyder.Ctnr'])),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('label', self.gf('django.db.models.fields.CharField')(max_length=63, blank=True)),
('domain', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cyder.Domain'])),
('fqdn', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('target', self.gf('django.db.models.fields.CharField')(max_length=100, blank=True)),
('port', self.gf('django.db.models.fields.PositiveIntegerField')()),
('priority', self.gf('django.db.models.fields.PositiveIntegerField')()),
('weight', self.gf('django.db.models.fields.PositiveIntegerField')()),
))
db.send_create_signal('cyder', ['SRV'])
# Adding unique constraint on 'SRV', fields ['label', 'domain', 'target', 'port']
db.create_unique('srv', ['label', 'domain_id', 'target', 'port'])
# Adding M2M table for field views on 'SRV'
m2m_table_name = db.shorten_name('srv_views')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('srv', models.ForeignKey(orm['cyder.srv'], null=False)),
('view', models.ForeignKey(orm['cyder.view'], null=False))
))
db.create_unique(m2m_table_name, ['srv_id', 'view_id'])
# Adding model 'SSHFP'
db.create_table('sshfp', (
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, null=True, blank=True)),
('domain', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cyder.Domain'])),
('label', self.gf('django.db.models.fields.CharField')(max_length=63, blank=True)),
('fqdn', self.gf('django.db.models.fields.CharField')(db_index=True, max_length=255, blank=True)),
('ttl', self.gf('django.db.models.fields.PositiveIntegerField')(default=3600, null=True, blank=True)),
('description', self.gf('django.db.models.fields.CharField')(max_length=1000, blank=True)),
('ctnr', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cyder.Ctnr'])),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('key', self.gf('django.db.models.fields.CharField')(max_length=256)),
('algorithm_number', self.gf('django.db.models.fields.PositiveIntegerField')()),
('fingerprint_type', self.gf('django.db.models.fields.PositiveIntegerField')(default=1)),
))
db.send_create_signal('cyder', ['SSHFP'])
# Adding unique constraint on 'SSHFP', fields ['domain', 'label']
db.create_unique('sshfp', ['domain_id', 'label'])
# Adding M2M table for field views on 'SSHFP'
m2m_table_name = db.shorten_name('sshfp_views')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('sshfp', models.ForeignKey(orm['cyder.sshfp'], null=False)),
('view', models.ForeignKey(orm['cyder.view'], null=False))
))
db.create_unique(m2m_table_name, ['sshfp_id', 'view_id'])
# Adding model 'TXT'
db.create_table('txt', (
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, null=True, blank=True)),
('domain', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cyder.Domain'])),
('label', self.gf('django.db.models.fields.CharField')(max_length=63, blank=True)),
('fqdn', self.gf('django.db.models.fields.CharField')(db_index=True, max_length=255, blank=True)),
('ttl', self.gf('django.db.models.fields.PositiveIntegerField')(default=3600, null=True, blank=True)),
('description', self.gf('django.db.models.fields.CharField')(max_length=1000, blank=True)),
('ctnr', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cyder.Ctnr'])),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('txt_data', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal('cyder', ['TXT'])
# Adding M2M table for field views on 'TXT'
m2m_table_name = db.shorten_name('txt_views')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('txt', models.ForeignKey(orm['cyder.txt'], null=False)),
('view', models.ForeignKey(orm['cyder.view'], null=False))
))
db.create_unique(m2m_table_name, ['txt_id', 'view_id'])
def backwards(self, orm):
# Removing unique constraint on 'SSHFP', fields ['domain', 'label']
db.delete_unique('sshfp', ['domain_id', 'label'])
# Removing unique constraint on 'SRV', fields ['label', 'domain', 'target', 'port']
db.delete_unique('srv', ['label', 'domain_id', 'target', 'port'])
# Removing unique constraint on 'SOAAV', fields ['entity', 'attribute']
db.delete_unique('soa_av', ['entity_id', 'attribute_id'])
# Removing unique constraint on 'Nameserver', fields ['domain', 'server']
db.delete_unique('nameserver', ['domain_id', 'server'])
# Removing unique constraint on 'MX', fields ['domain', 'label', 'server']
db.delete_unique('mx', ['domain_id', 'label', 'server'])
# Removing unique constraint on 'DynamicInterfaceAV', fields ['entity', 'attribute']
db.delete_unique('dynamic_interface_av', ['entity_id', 'attribute_id'])
# Removing unique constraint on 'DynamicInterface', fields ['range', 'mac']
db.delete_unique('dynamic_interface', ['range_id', 'mac'])
# Removing unique constraint on 'CtnrUser', fields ['ctnr', 'user']
db.delete_unique('ctnr_users', ['ctnr_id', 'user_id'])
# Removing unique constraint on 'RangeAV', fields ['entity', 'attribute']
db.delete_unique('range_av', ['entity_id', 'attribute_id'])
# Removing unique constraint on 'Range', fields ['start_upper', 'start_lower', 'end_upper', 'end_lower']
db.delete_unique('range', ['start_upper', 'start_lower', 'end_upper', 'end_lower'])
# Removing unique constraint on 'NetworkAV', fields ['entity', 'attribute']
db.delete_unique('network_av', ['entity_id', 'attribute_id'])
# Removing unique constraint on 'Network', fields ['ip_upper', 'ip_lower', 'prefixlen']
db.delete_unique('network', ['ip_upper', 'ip_lower', 'prefixlen'])
# Removing unique constraint on 'SiteAV', fields ['entity', 'attribute']
db.delete_unique('site_av', ['entity_id', 'attribute_id'])
# Removing unique constraint on 'Site', fields ['name', 'parent']
db.delete_unique('site', ['name', 'parent_id'])
# Removing unique constraint on 'VrfAV', fields ['entity', 'attribute']
db.delete_unique('vrf_av', ['entity_id', 'attribute_id'])
# Removing unique constraint on 'VlanAV', fields ['entity', 'attribute']
db.delete_unique('vlan_av', ['entity_id', 'attribute_id'])
# Removing unique constraint on 'Vlan', fields ['name', 'number']
db.delete_unique('vlan', ['name', 'number'])
# Removing unique constraint on 'StaticInterfaceAV', fields ['entity', 'attribute']
db.delete_unique('static_interface_av', ['entity_id', 'attribute_id'])
# Removing unique constraint on 'StaticInterface', fields ['label', 'domain']
db.delete_unique('static_interface', ['label', 'domain_id'])
# Removing unique constraint on 'StaticInterface', fields ['ip_upper', 'ip_lower']
db.delete_unique('static_interface', ['ip_upper', 'ip_lower'])
# Removing unique constraint on 'PTR', fields ['ip_str', 'ip_type', 'fqdn']
db.delete_unique('ptr', ['ip_str', 'ip_type', 'fqdn'])
# Removing unique constraint on 'AddressRecord', fields ['label', 'domain', 'fqdn', 'ip_upper', 'ip_lower', 'ip_type']
db.delete_unique('address_record', ['label', 'domain_id', 'fqdn', 'ip_upper', 'ip_lower', 'ip_type'])
# Removing unique constraint on 'CNAME', fields ['label', 'domain', 'target']
db.delete_unique('cname', ['label', 'domain_id', 'target'])
# Removing unique constraint on 'View', fields ['name']
db.delete_unique('view', ['name'])
# Removing unique constraint on 'WorkgroupAV', fields ['entity', 'attribute']
db.delete_unique('workgroup_av', ['entity_id', 'attribute_id'])
# Removing unique constraint on 'SystemAV', fields ['entity', 'attribute']
db.delete_unique('system_av', ['entity_id', 'attribute_id'])
# Deleting model 'Token'
db.delete_table('cyder_token')
# Deleting model 'Attribute'
db.delete_table('attribute')
# Deleting model 'Domain'
db.delete_table('domain')
# Deleting model 'System'
db.delete_table('system')
# Deleting model 'SystemAV'
db.delete_table('system_av')
# Deleting model 'Workgroup'
db.delete_table('workgroup')
# Deleting model 'WorkgroupAV'
db.delete_table('workgroup_av')
# Deleting model 'View'
db.delete_table('view')
# Deleting model 'CNAME'
db.delete_table('cname')
# Removing M2M table for field views on 'CNAME'
db.delete_table(db.shorten_name('cname_views'))
# Deleting model 'AddressRecord'
db.delete_table('address_record')
# Removing M2M table for field views on 'AddressRecord'
db.delete_table(db.shorten_name('address_record_views'))
# Deleting model 'PTR'
db.delete_table('ptr')
# Removing M2M table for field views on 'PTR'
db.delete_table(db.shorten_name('ptr_views'))
# Deleting model 'StaticInterface'
db.delete_table('static_interface')
# Removing M2M table for field views on 'StaticInterface'
db.delete_table(db.shorten_name('static_interface_views'))
# Deleting model 'StaticInterfaceAV'
db.delete_table('static_interface_av')
# Deleting model 'Vlan'
db.delete_table('vlan')
# Deleting model 'VlanAV'
db.delete_table('vlan_av')
# Deleting model 'Vrf'
db.delete_table('vrf')
# Deleting model 'VrfAV'
db.delete_table('vrf_av')
# Deleting model 'Site'
db.delete_table('site')
# Deleting model 'SiteAV'
db.delete_table('site_av')
# Deleting model 'Network'
db.delete_table('network')
# Deleting model 'NetworkAV'
db.delete_table('network_av')
# Deleting model 'Range'
db.delete_table('range')
# Removing M2M table for field views on 'Range'
db.delete_table(db.shorten_name('range_views'))
# Deleting model 'RangeAV'
db.delete_table('range_av')
# Deleting model 'Ctnr'
db.delete_table('ctnr')
# Removing M2M table for field domains on 'Ctnr'
db.delete_table(db.shorten_name('ctnr_domains'))
# Removing M2M table for field ranges on 'Ctnr'
db.delete_table(db.shorten_name('ctnr_ranges'))
# Removing M2M table for field workgroups on 'Ctnr'
db.delete_table(db.shorten_name('ctnr_workgroups'))
# Deleting model 'CtnrUser'
db.delete_table('ctnr_users')
# Deleting model 'UserProfile'
db.delete_table('auth_user_profile')
# Deleting model 'Task'
db.delete_table(u'task')
# Deleting model 'DynamicInterface'
db.delete_table('dynamic_interface')
# Deleting model 'DynamicInterfaceAV'
db.delete_table('dynamic_interface_av')
# Deleting model 'DNSBuildRun'
db.delete_table('cyder_dnsbuildrun')
# Deleting model 'BuildManifest'
db.delete_table('cyder_buildmanifest')
# Deleting model 'MX'
db.delete_table('mx')
# Removing M2M table for field views on 'MX'
db.delete_table(db.shorten_name('mx_views'))
# Deleting model 'Nameserver'
db.delete_table('nameserver')
# Removing M2M table for field views on 'Nameserver'
db.delete_table(db.shorten_name('nameserver_views'))
# Deleting model 'SOA'
db.delete_table('soa')
# Deleting model 'SOAAV'
db.delete_table('soa_av')
# Deleting model 'SRV'
db.delete_table('srv')
# Removing M2M table for field views | |
<reponame>symbooglix/boogie-runner
#!/usr/bin/env python
# vim: set sw=2 ts=2 softtabstop=2 expandtab:
import argparse
import logging
import os
import pprint
import sys
import yaml
from br_util import FinalResultType, classifyResult, validateMappingFile
import matplotlib.pyplot as plt
try:
# Try to use libyaml which is faster
from yaml import CLoader as Loader, CDumper as Dumper
except ImportError:
# fall back on python implementation
from yaml import Loader, Dumper
def main(args):
# FIXME: Clean me up!
#resultTypes = [ r.name for r in list(FinalResultType)] # Get list of ResultTypes as strings
#defaultTypes = [ r.name for r in list(FinalResultType) if r in [FinalResultType.FULLY_EXPLORED, FinalResultType.BUG_FOUND]]
#failureTypes = { FinalResultType.UNKNOWN, FinalResultType.TIMED_OUT, FinalResultType.OUT_OF_MEMORY }
usefulAnswerResultTypes = { FinalResultType.FULLY_EXPLORED, FinalResultType.BUG_FOUND}
parser = argparse.ArgumentParser()
parser.add_argument("-l","--log-level",type=str, default="info", dest="log_level", choices=['debug','info','warning','error'])
parser.add_argument("-v", "--verbose", action='store_true', help='Show detailed information about mismatch')
parser.add_argument('label_mapping', type=argparse.FileType('r'), help='correctness mapping YAML file')
parser.add_argument('result_ymls', nargs=2, help='Input YAML files')
parser.add_argument('max_time', type=int, help='Maximum time in seconds, results timings will be clamped to this value')
parser.add_argument('--ipython', action='store_true')
parser.add_argument('--point-size', type=float, default=30.0, dest='point_size')
parser.add_argument('--click-shows-raw-data', dest='click_shows_raw_data', action='store_true', default=False)
parser.add_argument('--only-show', dest='only_show', default='all', choices=['all', 'correct', 'incorrect', 'unknown'])
parser.add_argument('-c', '--only-allow-consistent', dest='only_allow_consistent',action='store_true', default=False)
parser.add_argument('--axis-name-map',dest='axis_name_map', default=None, type=str)
parser.add_argument('--draw-dual-timeout-count',dest='draw_dual_timeout_count', action='store_true', default=False)
#group = parser.add_mutually_exclusive_group()
#group.add_argument('-r', '--result-types-to-plot', nargs='+', dest='result_types_to_plot',
# choices=resultTypes, default=defaultTypes,
# help='Result types to plot (at least one of the pair must be of this type). Default: %(default)s')
#group.add_argument('--all', default=False, action='store_true', help='Plot all result types')
pargs = parser.parse_args(args)
print(pargs)
logLevel = getattr(logging, pargs.log_level.upper(),None)
logging.basicConfig(level=logLevel)
if len(pargs.result_ymls) != 2:
logger.error('Need two YAML files')
# Load mapping
correctnessMapping = yaml.load(pargs.label_mapping, Loader=Loader)
validateMappingFile(correctnessMapping)
axisMapping = None
# Load the legend mapping if it exists
if pargs.axis_name_map != None:
if not os.path.exists(pargs.axis_name_map):
logging.error('"{}" does not exist'.format(pargs.axis_name_map))
return 1
with open(pargs.axis_name_map, 'r') as openFile:
axisMapping = yaml.load(openFile, Loader=Loader)
if not isinstance(axisMapping, dict):
logging.error('axis mapping should be a dictionary mapping file paths to axis name')
return 1
## Create set of allowed result types
#if pargs.all:
# allowedResultTypes = set(FinalResultType)
#else:
# allowedResultTypes = set()
# for rType in pargs.result_types_to_plot:
# allowedResultTypes.add(FinalResultType[rType])
#logging.info('Plotting points of type {}'.format(allowedResultTypes))
# Check that each yml file exists
# resultListName (i.e. filename) -> list of results for that file
data = { }
resultListNames = [ ]
for f in pargs.result_ymls:
if not os.path.exists(f):
logging.error('YAML file {} does not exist'.format(f))
return 1
# Compute result set name
resultListName = f
resultListNames.append(resultListName)
if resultListName in data:
logging.error('Can\'t use {} as label name because it is already used'.format(resultListName))
return 1
data[resultListName] = None # Will be filled with loaded YAML data
# Now load YAML
length = 0
for f in pargs.result_ymls:
logging.info('Loading YAML file {}'.format(f))
with open(f, 'r') as openFile:
results = yaml.load(openFile, Loader=Loader)
logging.info('Loading complete')
assert isinstance(results, list)
resultListName = f
data[resultListName] = results
length = len(results)
# Check the lengths are the same
for name, rList in data.items():
if len(rList) != length:
logging.error('There is a length mismatch for {}, expected {} entries but was'.format(name, length, len(rList)))
return 1
programToResultSetsMap = { }
for resultListName in resultListNames:
for r in data[resultListName]:
programName = r['program']
# Only add programs that we want
if not programName in correctnessMapping:
logging.error('{} is missing from correctness mapping'.format(programName))
return 1
expectedCorrect = correctnessMapping[programName]['expected_correct']
assert expectedCorrect == None or isinstance(expectedCorrect, bool)
skipBenchmark = False
if pargs.only_show == 'incorrect':
if expectedCorrect != False:
skipBenchmark = True
elif pargs.only_show == 'correct':
if expectedCorrect != True:
skipBenchmark = True
elif pargs.only_show == 'unknown':
if expectedCorrect != None:
skipBenchmark = True
elif pargs.only_show == 'all':
skipBenchmark = False
else:
assert False
if skipBenchmark:
logging.info('Filtering out {} ({}) because we are only showing benchmarks labelled as {}'.format(programName, expectedCorrect, pargs.only_show))
continue
try:
existingDict = programToResultSetsMap[programName]
existingDict[resultListName] = r
except KeyError:
programToResultSetsMap[programName] = { resultListName:r }
# Check there are the same number of results for each program
allowConsistentMismatchCount = 0
disregardedResultTypeCount = 0
clampCount = 0
failedAnalysisGivenMaxTimeCount = 0
xData = [ ]
yData = [ ]
countXLtYExceptDualTimeout=0
countYLtXExceptDualTimeout=0
countDualTimeout=0
countXEqYExceptDualTimeout=0
annotationLabels = [ ]
for programName, resultListNameToRawResultMap in programToResultSetsMap.items():
if len(resultListNameToRawResultMap) != len(resultListNames):
logging.error('For program {} there we only {} result lists but expected {}'.format(
programName, len(resultListNameToRawResultMap), len(resultListNames)))
logging.error(pprint.pformat(resultListNameToRawResultMap))
return 1
firstResult = resultListNameToRawResultMap[resultListNames[0]]
secondResult = resultListNameToRawResultMap[resultListNames[1]]
firstType = classifyResult(firstResult)
secondType = classifyResult(secondResult)
if pargs.only_allow_consistent:
# For this program check that classifications are consistent
# we take the first programList name as the expected
if firstType != secondType:
allowConsistentMismatchCount += 1
logging.warning('Found mismatch for program {}:'.format(programName))
for resultListName in resultListNames:
logging.warning('{}: {}'.format(resultListName, classifyResult(resultListNameToRawResultMap[resultListName])))
if pargs.verbose:
logging.warning('\n{}'.format(pprint.pformat(resultListNameToRawResultMap)))
logging.warning('Disregarding result\n')
continue
#if not firstType in allowedResultTypes and not secondType in allowedResultTypes:
# disregardedResultTypeCount += 1
# logging.warning('Disregarding {} in {} due to neither result types ({} and {}) not being one of the allow result types'.format(
# programName,
# resultListNames[0],
# firstType,
# secondType))
# continue
# Clamp timings
didClamp = False
for resultListName in resultListNames:
r = resultListNameToRawResultMap[resultListName]
if r['total_time'] > pargs.max_time:
logging.debug('Clamping {} for {}'.format(programName, resultListName))
r['total_time'] = pargs.max_time
didClamp = True
if not classifyResult(r) in usefulAnswerResultTypes:
logging.info('Clamping time for program {} for {} with result type {}'.format(
r['program'], resultListName, classifyResult(r)))
r['total_time'] = pargs.max_time
didClamp = True
if didClamp:
clampCount += 1
# Add data point for plotting
xPoint = firstResult['total_time']
yPoint = secondResult['total_time']
# Update counts
if xPoint == yPoint and xPoint == pargs.max_time:
countDualTimeout += 1
else:
if xPoint < yPoint:
countXLtYExceptDualTimeout +=1
#print(programName)
#print(pprint.pformat(resultListNameToRawResultMap))
#return 1
elif yPoint < xPoint:
countYLtXExceptDualTimeout +=1
else:
assert xPoint == yPoint
countXEqYExceptDualTimeout +=1
xData.append(xPoint)
yData.append(yPoint)
annotationLabels.append(programName)
# Give information
# This is highly result specific and probably should be removed.
occuranceCount = 0
yValueThresh = 10.0
xValueThresh = 100.0
for xValue, yValue in zip(xData, yData):
if yValue < yValueThresh and xValue > xValueThresh and xValue != pargs.max_time:
occuranceCount += 1
print(" # of times yValue < {} and xValue > {} and where for tool on x axis it was not a timeout : {}".format(yValueThresh, xValueThresh, occuranceCount))
# Finally do plotting
assert len(xData) == len(yData) == len(annotationLabels)
assert countXLtYExceptDualTimeout + countYLtXExceptDualTimeout + countXEqYExceptDualTimeout + countDualTimeout == len(xData)
extend = 100
tickFreq = 100
xAxisLabel=""
yAxisLabel=""
if pargs.axis_name_map:
xAxisLabel = axisMapping[resultListNames[0]]
yAxisLabel = axisMapping[resultListNames[1]]
else:
xAxisLabel=resultListNames[0]
yAxisLabel=resultListNames[1]
if pargs.only_allow_consistent:
logging.info('# of mismatches when only allowing consistent results: {}'.format(allowConsistentMismatchCount))
logging.info('# of result pairs clamped: {}'.format(clampCount))
logging.info('# of points plotted: {} out of {}'.format(len(xData), len(programToResultSetsMap)))
logging.info('# of {x} < {y} (x < y): {}'.format(countXLtYExceptDualTimeout, x=xAxisLabel, y=yAxisLabel))
logging.info('# of {y} < {x} (y < x): {}'.format(countYLtXExceptDualTimeout, x=xAxisLabel, y=yAxisLabel))
logging.info('# of {x} == {y} (x == y) (ignoring timeouts): {}'.format(countXEqYExceptDualTimeout, x=xAxisLabel, y=yAxisLabel))
logging.info('# of dual timeouts: {}'.format(countDualTimeout))
fig, ax = plt.subplots()
splot = ax.scatter(xData, yData, picker=5, s=pargs.point_size)
fontSize=20
ax.tick_params(which='both', labelsize=fontSize)
xAxisLabel += " execution time (s)"
yAxisLabel += " execution time (s)"
ax.set_xlabel(xAxisLabel, fontsize=fontSize)
ax.set_xlim(0,pargs.max_time + extend)
# +1 is just so the pargs.max_time is included because range()'s end is not inclusive
ax.set_xticks(range(0, pargs.max_time + 1, tickFreq))
ax.set_ylabel(yAxisLabel, fontsize=fontSize)
ax.set_ylim(0,pargs.max_time + extend)
ax.set_yticks(range(0, pargs.max_time + 1, tickFreq))
ax.grid(False)
# HACK: Annotate gt,lt and dual timeout numbers
# FIXME: don't hardcode
ax.annotate('{}'.format(countXLtYExceptDualTimeout), xy=(200,550), fontsize=40)
ax.annotate('{}'.format(countYLtXExceptDualTimeout), xy=(550,200), fontsize=40)
if countXEqYExceptDualTimeout > 0:
ax.annotate('{}'.format(countXEqYExceptDualTimeout), xy=(450,450))
if pargs.draw_dual_timeout_count:
ax.annotate('{}'.format(countDualTimeout), xy=(pargs.max_time -5, pargs.max_time -5), xytext=(pargs.max_time -100, pargs.max_time), arrowprops=dict(width=0.5,facecolor='black', shrink=0.05))
# Add annotations that become visible when clicked
DataPointReporter(splot, xData, yData, annotationLabels, programToResultSetsMap, pargs.click_shows_raw_data)
fig.tight_layout()
# Identity line
ax.plot([ 0 , pargs.max_time + extend], [0, pargs.max_time + extend], linewidth=1.0, color='black')
if pargs.ipython:
from IPython import embed
embed()
# Call fig.show() to see the figure
else:
plt.show()
return 0
class DataPointReporter:
def __init__(self, scatter, xData, yData, annotationLabels, programToResultSetsMap, clickShowsRawData):
self.scatter = scatter
self.cid = scatter.figure.canvas.mpl_connect('pick_event', self)
self.annotationLabels = annotationLabels
self.programToResultSetsMap = programToResultSetsMap
self.clickShowsRawData = clickShowsRawData
# Add annotations, by hide them by default
self.annotationObjects = [ ]
self.lastClickedAnnotationObj = None
for index, text in enumerate(annotationLabels):
text = text.replace('/','/\n')
annotation = scatter.axes.annotate(text, (xData[index], yData[index]))
annotation.set_visible(False)
annotation.set_horizontalalignment('center')
self.annotationObjects.append(annotation)
def __call__(self, event):
programName = self.annotationLabels[event.ind[0]]
logging.info('*****')
logging.info('{}'.format(programName))
for resultListName, rawResultData in self.programToResultSetsMap[programName].items():
if self.clickShowsRawData:
logging.info('{}: {}\n{}'.format(resultListName, classifyResult(rawResultData), pprint.pformat(rawResultData)))
else:
logging.info('{}: {} ({} ± {} secs)'.format(
resultListName,
classifyResult(rawResultData),
rawResultData['total_time'],
rawResultData['total_time_stddev'] if 'total_time_stddev' in rawResultData else 'UNKNOWN'
))
logging.info('*****')
theAnnotation = self.annotationObjects[event.ind[0]]
if self.lastClickedAnnotationObj != None:
| |
# -*- coding: utf-8 -*-
#############################################################################
# SRWLIB Example: Virtual Beamline: a set of utilities and functions allowing to simulate
# operation of an SR Beamline.
# The standard use of this script is from command line, with some optional arguments,
# e.g. for calculation (with default parameter values) of:
# UR Spectrum Through a Slit (Flux within a default aperture):
# python SRWLIB_VirtBL_*.py --sm
# Single-Electron UR Spectrum (Flux per Unit Surface):
# python SRWLIB_VirtBL_*.py --ss
# UR Power Density (at the first optical element):
# python SRWLIB_VirtBL_*.py --pw
# Input Single-Electron UR Intensity Distribution (at the first optical element):
# python SRWLIB_VirtBL_*.py --si
# Single-Electron Wavefront Propagation:
# python SRWLIB_VirtBL_*.py --ws
# Multi-Electron Wavefront Propagation:
# Sequential Mode:
# python SRWLIB_VirtBL_*.py --wm
# Parallel Mode (using MPI / mpi4py), e.g.:
# mpiexec -n 6 python SRWLIB_VirtBL_*.py --wm
# For changing parameters of all these calculaitons from the default valuse, see the definition
# of all options in the list at the end of the script.
# v 0.04
#############################################################################
from __future__ import print_function # Python 2.7 compatibility
from srwl_bl import *
try:
import cPickle as pickle
except:
import pickle
# *********************************Setting Up Optical Elements and Propagation Parameters
def set_optics(_v):
"""This function describes optical layout of the SMI beamline of NSLS-II.
Such function has to be written for every beamline to be simulated; it is specific to a particular beamline.
:param _v: structure containing all parameters allowed to be varied for that particular beamline.
:return SRWLOptC(): container object.
"""
# Nominal Positions of Optical Elements [m] (with respect to straight section center)
zStart = _v.op_r
zAPE = zStart
zMOAT = zStart + 2.44
zHFM = zStart + 2.44 + 2.94244
zVFM = zStart + 2.44 + 2.94244 + 3.42
zVM = zStart + 2.44 + 2.94244 + 3.42 + 0.7
zSSA = zStart + 2.44 + 2.94244 + 3.42 + 0.7 + 8.0
zES1 = zStart + 2.44 + 2.94244 + 3.42 + 0.7 + 8.0 + 3.9
zCRL = zStart + 2.44 + 2.94244 + 3.42 + 0.7 + 8.0 + 10.33492
zES2 = zStart + 2.44 + 2.94244 + 3.42 + 0.7 + 8.0 + 10.33492 + 1.66508
# Instantiation of the Optical Elements:
msg = 'The combination of beamline={} / bump={} / BMmode={}'
print(msg.format(_v.beamline, _v.bump, _v.BMmode))
msg += ' is not supported.'
if _v.beamline == 'ES1':
if not _v.bump:
arElNamesAll_01 = ['D_APE_HFM', 'HFML', 'D_HFM_VFM', 'VFML', 'D_VFM_SSA', 'SSA', 'D_SSA_ES1']
else:
if _v.BMmode == 'Norm':
arElNamesAll_01 = ['D_APE_MOA', 'MOAT', 'D_MOA_HFM', 'HFML', 'HFMT', 'D_HFM_VFM', 'VFML', 'VFMT',
'D_VFM_VM', 'VMT', 'D_VM_SSA', 'SSA', 'D_SSA_ES1']
else:
raise Exception(msg.format(_v.beamline, _v.bump, _v.BMmode))
elif _v.beamline == 'ES2':
if not _v.bump:
raise Exception(msg.format(_v.beamline, _v.bump, _v.BMmode))
else:
if _v.BMmode == 'LowDiv': # focus ES2 without Kb with low divergence
arElNamesAll_01 = ['D_APE_MOA', 'MOAT', 'D_MOA_HFM', 'HFML', 'HFMT', 'D_HFM_VFM', 'VFML', 'VFMT',
'D_VFM_VM', 'VMT', 'D_VM_SSA', 'SSA', 'D_SSA_CRL', 'ApCRL', 'CRL', 'D_CRL_ES2']
elif _v.BMmode == 'Norm':
arElNamesAll_01 = ['D_APE_MOA', 'MOAT', 'D_MOA_HFM', 'HFML', 'HFMT', 'D_HFM_VFM', 'VFML', 'VFMT',
'D_VFM_VM', 'VMT', 'D_VM_SSA', 'SSA', 'D_SSA_CRL', 'ApCRL', 'CRL', 'D_CRL_ES2']
else:
raise Exception(msg.format(_v.beamline, _v.bump, _v.BMmode))
else:
raise Exception(msg.format(_v.beamline, _v.bump, _v.BMmode))
arElNamesAll_02 = []
arElNamesAll_03 = []
arElNamesAll_04 = []
arElNamesAll = arElNamesAll_01
if _v.op_BL == 2:
arElNamesAll = arElNamesAll_02
elif _v.op_BL == 3:
arElNamesAll = arElNamesAll_03
elif _v.op_BL == 4:
arElNamesAll = arElNamesAll_04
# Treat beamline sub-cases / alternative configurations
if len(_v.op_fin) > 0:
if _v.op_fin not in arElNamesAll:
raise Exception('Optical element with the name specified in the "op_fin" option is not present in this beamline')
arElNames = []
for i in range(len(arElNamesAll)):
arElNames.append(arElNamesAll[i])
if len(_v.op_fin) > 0:
if arElNamesAll[i] == _v.op_fin:
break
# Lists of SRW optical element objects and their corresponding propagation parameters
el = []
pp = []
for i in range(len(arElNames)):
# Process all drifts here:
if arElNames[i] == 'D_APE_MOA':
el.append(SRWLOptD(zMOAT - zAPE))
if _v.beamline == 'ES1':
pp.append(_v.op_APE_MOA_es1_pp)
elif _v.beamline == 'ES2':
pp.append(_v.op_APE_MOA_es2_pp)
elif arElNames[i] == 'D_MOA_HFM':
el.append(SRWLOptD(zHFM - zMOAT))
pp.append(_v.op_MOA_HFM_pp)
elif arElNames[i] == 'D_HFM_VFM':
el.append(SRWLOptD(zVFM - zHFM))
pp.append(_v.op_HFM_VFM_pp)
elif arElNames[i] == 'D_VFM_VM':
el.append(SRWLOptD(zVM - zVFM))
pp.append(_v.op_VFM_VM_pp)
elif arElNames[i] == 'D_VM_SSA':
el.append(SRWLOptD(zSSA - zVM))
pp.append(_v.op_VM_SSA_pp)
elif arElNames[i] == 'D_SSA_CRL':
el.append(SRWLOptD(zCRL - zSSA))
pp.append(_v.op_SSA_CRL_pp)
elif arElNames[i] == 'D_CRL_ES2':
el.append(SRWLOptD(zES2 - zCRL))
pp.append(_v.op_CRL_ES2_pp)
elif arElNames[i] == 'D_SSA_ES1':
el.append(SRWLOptD(zES1 - zSSA))
pp.append(_v.op_SSA_ES1_pp)
elif arElNames[i] == 'D_APE_HFM':
el.append(SRWLOptD(zHFM - zAPE))
if _v.beamline == 'ES1':
pp.append(_v.op_APE_MOA_es1_pp)
elif _v.beamline == 'ES2':
pp.append(_v.op_APE_MOA_es2_pp)
elif arElNames[i] == 'D_VFM_SSA':
el.append(SRWLOptD(zSSA - zVFM))
pp.append(_v.op_VFM_SSA_pp)
elif arElNames[i] == 'MOAT':
ifnMOAT = os.path.join(_v.fdir, _v.op_MOAT_ifn) if len(_v.op_MOAT_ifn) > 0 else ''
if len(ifnMOAT) > 0 and os.path.isfile(ifnMOAT):
hProfDataMOAT = srwl_uti_read_data_cols(ifnMOAT, '\t')
opMOAT = srwl_opt_setup_surf_height_1d(hProfDataMOAT, 'y', _ang=0.09727, _nx=100, _ny=500,
_size_x=2.0e-02,
_size_y=16e-3 * sin(0.09727))
ofnMOAT = os.path.join(_v.fdir, _v.op_MOAT_ofn) if len(_v.op_MOAT_ofn) > 0 else ''
if len(ofnMOAT) > 0:
pathDifMOAT = opMOAT.get_data(3, 3)
srwl_uti_save_intens_ascii(pathDifMOAT, opMOAT.mesh, ofnMOAT, 0,
['', 'Horizontal Position', 'Vertical Position', 'Opt. Path Dif.'],
_arUnits=['', 'm', 'm', 'm'])
el.append(opMOAT)
pp.append(_v.op_MOAT_pp)
elif arElNames[i] == 'HFML':
if _v.BMmode == 'Norm':
el.append(SRWLOptL(_Fx=1. / (1. / zHFM + 1. / ((zVFM - zHFM) + (zSSA - zVFM) + (zES1 - zSSA))))) # to focus at ES1
elif _v.BMmode == 'LowDiv':
el.append(SRWLOptL(_Fx=1. / (1. / zHFM + 1. / ((zVFM - zHFM) + (zSSA - zVFM) + (zES1 - zSSA) + 8.1 - 0.3)))) # to focus at ES2 with a low divergence
pp.append(_v.op_HFML_pp)
elif arElNames[i] == 'HFMT':
ifnHFM = os.path.join(_v.fdir, _v.op_HFM_ifn) if len(_v.op_HFM_ifn) > 0 else ''
if len(ifnHFM) > 0:
hProfDataHFM = srwl_uti_read_data_cols(ifnHFM, '\t')
opHFM = srwl_opt_setup_surf_height_1d(hProfDataHFM, 'x', _ang=_v.op_HFM_ang, _amp_coef=_v.op_HFM_amp,
_nx=803, _ny=200, _size_x=0.5 * sin(3.1415927e-03),
_size_y=6.0e-03)
ofnHFM = os.path.join(_v.fdir, _v.op_HFM_ofn) if len(_v.op_HFM_ofn) > 0 else ''
if len(ofnHFM) > 0:
pathDifHFM = opHFM.get_data(3, 3)
srwl_uti_save_intens_ascii(pathDifHFM, opHFM.mesh, ofnHFM, 0,
['', 'Horizontal Position', 'Vertical Position', 'Opt. Path Dif.'],
_arUnits=['', 'm', 'm', 'm'])
el.append(opHFM)
pp.append(_v.op_HFMT_pp)
elif arElNames[i] == 'VFML':
if _v.BMmode == 'Norm':
# Focus at ES1; if using Bump, VFM must be 3.9+0.3 m (to compensate bump which moves focus 0.2 m upstream):
el.append(SRWLOptL(_Fy=1. / (1. / (zVFM - 0.6) + 1. / ((zSSA - zVFM) + (zES1 - zSSA) + 0.3))))
elif _v.BMmode == 'LowDiv':
# Focus at ES2 with a low divergence:
el.append(SRWLOptL(_Fy=1. / (1. / (zVFM - 0.6) + 1. / ((zSSA - zVFM) + (zES1 - zSSA) - 5.7 + 8.1))))
pp.append(_v.op_VFML_pp)
elif arElNames[i] == 'VFMT':
ifnVFM = os.path.join(_v.fdir, _v.op_VFM_ifn) if len(_v.op_VFM_ifn) > 0 else ''
if len(ifnVFM) > 0:
hProfDataVFM = srwl_uti_read_data_cols(ifnVFM, '\t')
opVFM = srwl_opt_setup_surf_height_1d(hProfDataVFM, 'y', _ang=3.1415927e-03, _nx=200, _ny=288,
_size_x=6.0e-03, _size_y=0.4 * sin(3.1415927e-03))
ofnVFM = os.path.join(_v.fdir, _v.op_VFM_ofn) if len(_v.op_VFM_ofn) > 0 else ''
if len(ofnVFM) > 0:
pathDifVFM = opVFM.get_data(3, 3)
srwl_uti_save_intens_ascii(pathDifVFM, opVFM.mesh, ofnVFM, 0,
['', 'Horizontal Position', 'Vertical Position', 'Opt. Path Dif.'],
_arUnits=['', 'm', 'm', 'm'])
el.append(opVFM)
pp.append(_v.op_VFMT_pp)
elif arElNames[i] == 'VMT':
ifnVM = os.path.join(_v.fdir, _v.op_VM_ifn) if len(_v.op_VM_ifn) > 0 else ''
if len(ifnVM) > 0:
hProfDataVM = srwl_uti_read_data_cols(ifnVM, '\t')
# sinusoidal equal to HFM. the original spec is 0.1, 6.75e-09 both 'h' 'v', angle 6.1086524e-03 rad to correct for vertical.
opVM = srwl_opt_setup_surf_height_1d(hProfDataVM, 'y', _ang=3.1415927e-03, _nx=200, _ny=500,
_size_x=6.0e-03, _size_y=0.5 * sin(3.1415927e-03))
ofnVM = os.path.join(_v.fdir, _v.op_VM_ofn) if len(_v.op_VM_ofn) > 0 else ''
if len(ofnVM) > 0:
pathDifVM = opVM.get_data(3, 3)
srwl_uti_save_intens_ascii(pathDifVM, opVM.mesh, ofnVM, 0,
['', 'Horizontal Position', 'Vertical Position', 'Opt. Path Dif.'],
_arUnits=['', 'm', 'm', 'm'])
el.append(opVM)
pp.append(_v.op_VMT_pp)
elif arElNames[i] == 'SSA':
# SSA = SRWLOptA('r', 'a', 0.4e-03, 0.4e-03) # 0.4, 0.4 for NOT low divergence mode;
if _v.beamline == 'ES1' and _v.BMmode == 'Norm':
el.append(SRWLOptA('r', 'a', _v.op_SSA_es1_norm_dx, _v.op_SSA_es1_norm_dy))
elif _v.beamline == 'ES2' and _v.BMmode == 'Norm':
el.append(SRWLOptA('r', 'a', _v.op_SSA_es2_norm_dx, _v.op_SSA_es2_norm_dy))
elif _v.beamline == 'ES2' and _v.BMmode == 'LowDiv':
el.append(SRWLOptA('r', 'a', _v.op_SSA_es2_lowdiv_dx, _v.op_SSA_es2_lowdiv_dy))
pp.append(_v.op_SSA_pp)
elif arElNames[i] == 'ApCRL':
# ApCRL = SRWLOptA('c', 'a', 1.0e-3)
el.append(SRWLOptA('c', 'a', _v.op_ApCRL_r))
pp.append(_v.op_ApCRL_pp)
elif arElNames[i] == 'CRL':
'''
from Delta import Delta, DEFAULTS_FILE
delta_obj = Delta(
energy=_v.w_e,
precise=True,
data_file=os.path.join(os.path.dirname(os.path.dirname(DEFAULTS_FILE)), 'dat/Be_delta.dat'),
quiet=True
)
delta = delta_obj.delta # 8.21692879E-07 # Be @ 20.4KeV
'''
delta = 8.21692879E-07
attenLen = 28544.7e-06 # [m] #20.4KeV
diamCRL = 1.e-03 # CRL diameter
rMinCRL = 50e-06 # CRL radius at the tip of parabola [m]
nCRL = 23 # number of lenses
wallThickCRL = 32.4e-06 # CRL wall thickness [m]
el.append(srwl_opt_setup_CRL(3, delta, attenLen, 1, diamCRL, diamCRL, rMinCRL, nCRL, wallThickCRL, | |
1, 0)]),
((2, 3), [(1, 2, 1), (0, 1, 0)]),
((2,), [(1, 2, 0)]),
((1, 2), [(1, 2, 0), (3, 4, 0)]),
((1, 2), [(0, 0, 0), (0, 0, 0)]),
((2,), [(1, 2, 3),]),
((3, 2), [(1, 2, 1), (3, 4, 2)]),
((2,), [(-1, 2, 0),]),
((4, 2), [(-1, -2, 0), (1, 2, 0)]),
((4, 2), [(-1, 2, 0), (1, 2, 2)]),
((5,), [(-1, -2, 2),]),
((4, 2), [(-1, -2, 1), (1, 2, 2)])
]))
def testPad(self, shape, dtype, pads):
rng = jtu.rand_small(self.rng())
args_maker = lambda: [rng(shape, dtype)]
fun = lambda operand: lax.pad(operand, np.array(0, dtype), pads)
self._CompileAndCheck(fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_pads={}"
.format(jtu.format_shape_dtype_string(shape, dtype), pads),
"shape": shape, "dtype": dtype, "pads": pads}
for shape in [(2, 3)]
for dtype in default_dtypes
for pads in [
[(0, 0, 0), (0, 0, 0)], # no padding
[(1, 1, 0), (2, 2, 0)], # only positive edge padding
[(1, 2, 1), (0, 1, 0)], # edge padding and interior padding
[(0, 0, 0), (-1, -1, 0)], # negative padding
[(0, 0, 0), (-2, -2, 4)], # add big dilation then remove from edges
[(0, 0, 0), (-2, -3, 1)], # remove everything in one dimension
]))
def testPadAgainstNumpy(self, shape, dtype, pads):
rng = jtu.rand_small(self.rng())
args_maker = lambda: [rng(shape, dtype)]
op = lambda x: lax.pad(x, np.array(0, dtype), pads)
numpy_op = lambda x: lax_reference.pad(x, np.array(0, dtype), pads)
self._CheckAgainstNumpy(numpy_op, op, args_maker)
def testPadErrors(self):
with self.assertRaisesRegex(ValueError, "padding_config"):
lax.pad(np.zeros(2), 0., [(0, 1, 0), (0, 1, 0)])
with self.assertRaisesRegex(ValueError, "interior padding in padding_config must be nonnegative"):
lax.pad(np.zeros(2), 0., [(0, 1, -1)])
with self.assertRaisesRegex(ValueError, "Dimension size after padding is not at least 0"):
lax.pad(np.zeros(2), 0., [(-3, 0, 0)])
with self.assertRaisesRegex(ValueError, "Dimension size after padding is not at least 0"):
lax.pad(np.zeros(2), 0., [(-4, 0, 1)])
def testReverse(self):
rev = jax.jit(lambda operand: lax.rev(operand, dimensions))
dimensions = []
self.assertAllClose(np.array([0, 1, 2, 3]), rev(np.array([0, 1, 2, 3])),
check_dtypes=False)
dimensions = [0]
self.assertAllClose(np.array([3, 2, 1]), rev(np.array([1, 2, 3])),
check_dtypes=False)
dimensions = [0, 1]
self.assertAllClose(np.array([[6, 5, 4], [3, 2, 1]]),
rev(np.array([[1, 2, 3], [4, 5, 6]])),
check_dtypes=False)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_predshape={}_argshapes={}".format(
jtu.format_shape_dtype_string(pred_shape, np.bool_),
jtu.format_shape_dtype_string(arg_shape, arg_dtype)),
"pred_shape": pred_shape, "arg_shape": arg_shape, "arg_dtype": arg_dtype}
for arg_shape in [(), (3,), (2, 3)]
for pred_shape in ([(), arg_shape] if arg_shape else [()])
for arg_dtype in default_dtypes))
def testSelect(self, pred_shape, arg_shape, arg_dtype):
rng = jtu.rand_default(self.rng())
def args_maker():
return [rng(pred_shape, np.bool_), rng(arg_shape, arg_dtype),
rng(arg_shape, arg_dtype)]
return self._CompileAndCheck(lax.select, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_predshape={}_argshapes={}".format(
jtu.format_shape_dtype_string(pred_shape, np.bool_),
jtu.format_shape_dtype_string(arg_shape, arg_dtype)),
"pred_shape": pred_shape, "arg_shape": arg_shape, "arg_dtype": arg_dtype}
for arg_shape in [(), (3,), (2, 3)]
for pred_shape in ([(), arg_shape] if arg_shape else [()])
for arg_dtype in default_dtypes))
def testSelectAgainstNumpy(self, pred_shape, arg_shape, arg_dtype):
rng = jtu.rand_default(self.rng())
def args_maker():
return [rng(pred_shape, np.bool_), rng(arg_shape, arg_dtype),
rng(arg_shape, arg_dtype)]
return self._CheckAgainstNumpy(lax_reference.select, lax.select, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_shape={}_indices={}_limit_indices={}_strides={}".format(
jtu.format_shape_dtype_string(shape, dtype),
indices, limit_indices, strides),
"shape": shape, "dtype": dtype, "starts": indices,
"limits": limit_indices, "strides": strides}
for shape, indices, limit_indices, strides in [
[(3,), (1,), (2,), None],
[(7,), (4,), (7,), None],
[(5,), (1,), (5,), (2,)],
[(8,), (1,), (6,), (2,)],
[(5, 3), (1, 1), (3, 2), None],
[(5, 3), (1, 1), (3, 1), None],
[(7, 5, 3), (4, 0, 1), (7, 1, 3), None],
[(5, 3), (1, 1), (2, 1), (1, 1)],
[(5, 3), (1, 1), (5, 3), (2, 1)],
]
for dtype in default_dtypes))
def testSlice(self, shape, dtype, starts, limits, strides):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
op = lambda x: lax.slice(x, starts, limits, strides)
self._CompileAndCheck(op, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_shape={}_indices={}_limit_indices={}_strides={}".format(
jtu.format_shape_dtype_string(shape, dtype),
indices, limit_indices, strides),
"shape": shape, "dtype": dtype, "starts": indices,
"limits": limit_indices, "strides": strides}
for shape, indices, limit_indices, strides in [
[(3,), (1,), (2,), None],
[(7,), (4,), (7,), None],
[(5,), (1,), (5,), (2,)],
[(8,), (1,), (6,), (2,)],
[(5, 3), (1, 1), (3, 2), None],
[(5, 3), (1, 1), (3, 1), None],
[(7, 5, 3), (4, 0, 1), (7, 1, 3), None],
[(5, 3), (1, 1), (2, 1), (1, 1)],
[(5, 3), (1, 1), (5, 3), (2, 1)],
]
for dtype in default_dtypes))
def testSliceAgainstNumpy(self, shape, dtype, starts, limits, strides):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
op = lambda x: lax.slice(x, starts, limits, strides)
numpy_op = lambda x: lax_reference.slice(x, starts, limits, strides)
self._CheckAgainstNumpy(numpy_op, op, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_indices={}_size_indices={}".format(
jtu.format_shape_dtype_string(shape, dtype),
indices, size_indices),
"shape": shape, "dtype": dtype, "indices": indices,
"size_indices": size_indices}
for shape, indices, size_indices in [
[(3,), np.array((1,)), (1,)],
[(5, 3), (1, 1), (3, 1)],
[(5, 3), np.array((1, 1)), (3, 1)],
[(7, 5, 3), np.array((4, 1, 0)), (2, 0, 1)],
]
for dtype in default_dtypes))
def testDynamicSlice(self, shape, dtype, indices, size_indices):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype), np.array(indices)]
op = lambda x, starts: lax.dynamic_slice(x, starts, size_indices)
self._CompileAndCheck(op, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_indices={}_size_indices={}".format(
jtu.format_shape_dtype_string(shape, dtype),
indices, size_indices),
"shape": shape, "dtype": dtype, "indices": indices,
"size_indices": size_indices}
for shape, indices, size_indices in [
[(3,), (1,), (1,)],
[(5, 3), (1, 1), (3, 1)],
[(7, 5, 3), (4, 1, 0), (2, 0, 1)],
]
for dtype in default_dtypes))
def testDynamicSliceAgainstNumpy(self, shape, dtype, indices, size_indices):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype), np.array(indices)]
op = lambda x, s: lax.dynamic_slice(x, s, size_indices)
numpy_op = lambda x, s: lax_reference.dynamic_slice(x, s, size_indices)
self._CheckAgainstNumpy(numpy_op, op, args_maker)
def testDynamicSliceInDim(self):
# Regression test for mixed type problem in dynamic_slice_in_dim.
rng = jtu.rand_default(self.rng())
x = rng((6, 7), np.int32)
np.testing.assert_equal(lax.dynamic_slice_in_dim(x, 2, 3), x[2:5])
def testDynamicSliceArraySliceSizes(self):
rng = jtu.rand_default(self.rng())
x = rng((6, 7), np.int32)
np.testing.assert_equal(lax.dynamic_slice(x, [2, 3], jnp.array([2, 2])),
x[2:4, 3:5])
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_indices={}_update_shape={}".format(
jtu.format_shape_dtype_string(shape, dtype),
indices, update_shape),
"shape": shape, "dtype": dtype, "indices": indices,
"update_shape": update_shape}
for shape, indices, update_shape in [
[(3,), (1,), (1,)],
[(5, 3), (1, 1), (3, 1)],
[(7, 5, 3), (4, 1, 0), (2, 0, 1)],
]
for dtype in default_dtypes))
def testDynamicUpdateSlice(self, shape, dtype, indices, update_shape):
rng = jtu.rand_default(self.rng())
def args_maker():
return [rng(shape, dtype), rng(update_shape, dtype), np.array(indices)]
self._CompileAndCheck(lax.dynamic_update_slice, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_indices={}_update_shape={}".format(
jtu.format_shape_dtype_string(shape, dtype),
indices, update_shape),
"shape": shape, "dtype": dtype, "indices": indices,
"update_shape": update_shape}
for shape, indices, update_shape in [
[(3,), (1,), (1,)],
[(5, 3), (1, 1), (3, 1)],
[(7, 5, 3), (4, 1, 0), (2, 0, 1)],
]
for dtype in default_dtypes))
def testDynamicUpdateSliceAgainstNumpy(self, shape, dtype, indices,
update_shape):
rng = jtu.rand_default(self.rng())
def args_maker():
return [rng(shape, dtype), rng(update_shape, dtype), np.array(indices)]
self._CheckAgainstNumpy(lax_reference.dynamic_update_slice,
lax.dynamic_update_slice, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_perm={}".format(
jtu.format_shape_dtype_string(shape, dtype), perm),
"shape": shape, "dtype": dtype, "perm": perm}
for shape, perm in [
[(3, 4), (1, 0)],
[(3, 4), (0, 1)],
[(3, 4, 5), (2, 1, 0)],
[(3, 4, 5), (1, 0, 2)],
]
for dtype in default_dtypes))
def testTranspose(self, shape, dtype, perm):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
op = lambda x: lax.transpose(x, perm)
self._CompileAndCheck(op, args_maker)
def testTransposeWithArrayPermutation(self):
x = lax.transpose(np.ones((2, 3)), jnp.array([1, 0]))
self.assertEqual((3, 2), x.shape)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_perm={}".format(
jtu.format_shape_dtype_string(shape, dtype), perm),
"shape": shape, "dtype": dtype, "perm": perm}
for shape, perm in [
[(3, 4), (1, 0)],
[(3, 4), (0, 1)],
[(3, 4, 5), (2, 1, 0)],
[(3, 4, 5), (1, 0, 2)],
]
for dtype in default_dtypes))
def testTransposeAgainstNumpy(self, shape, dtype, perm):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
op = lambda x: lax.transpose(x, perm)
numpy_op = lambda x: lax_reference.transpose(x, perm)
self._CheckAgainstNumpy(numpy_op, op, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_op={}_inshape={}_reducedims={}_initval={}"
.format(op.__name__, jtu.format_shape_dtype_string(shape, dtype), dims,
init_val),
"op": op, "init_val": init_val, "shape": shape, "dtype": dtype, "dims": dims}
for init_val, op, types in [
(0, lax.add, default_dtypes),
(1, lax.mul, default_dtypes),
(0, lax.max, all_dtypes), # non-monoidal
(-np.inf, lax.max, float_dtypes),
(dtypes.iinfo(np.int32).min, lax.max, [np.int32]),
(dtypes.iinfo(np.int64).min, lax.max, [np.int64]),
(np.inf, lax.min, float_dtypes),
(dtypes.iinfo(np.int32).max, lax.min, [np.int32]),
(dtypes.iinfo(np.int64).max, lax.min, [np.int64]),
(dtypes.iinfo(np.uint32).max, lax.min, [np.uint32]),
(dtypes.iinfo(np.uint64).max, lax.min, [np.uint64]),
]
for dtype in types
for shape, dims in [
[(3, 4, 5), (0,)], [(3, 4, 5), (1, 2)],
[(3, 4, 5), (0, 2)], [(3, 4, 5), (0, 1, 2)]
]))
def testReduce(self, op, init_val, shape, dtype, dims):
rng_factory = (jtu.rand_default if dtypes.issubdtype(dtype, np.integer)
else jtu.rand_small)
rng = rng_factory(self.rng())
init_val = np.asarray(init_val, dtype=dtype)
fun = lambda operand, init_val: lax.reduce(operand, init_val, op, dims)
args_maker = lambda: [rng(shape, dtype), init_val]
self._CompileAndCheck(fun, args_maker)
# we separately test the version that uses a concrete init_val because it
# can hit different code paths
fun | |
plume has reached a maximum rise height yet
if np.sign(q0_local.Jz) != np.sign(q1_local.Jz):
top_counter += 1
# Check if the plume is at neutral buoyancy in an intrusion layer
# (e.g., after the top of the plume)
if top_counter > 0:
if np.sign(q0_local.rho_a - q0_local.rho) != \
np.sign(q1_local.rho_a - q1_local.rho):
# Update neutral buoyancy level counter
neutral_counter += 1
# Evaluate the stop criteria
if neutral_counter >= 1:
# Passed through the second neutral buoyany level
stop = True
if q[-1][10] / q1_local.D > sd_max:
# Progressed desired distance along the plume centerline
stop = True
if k >= 50000:
# Stop after specified number of iterations; used to protect
# against problems with the solution become stuck
stop = True
if q[-1][9] <= 0.:
# Reached a location at or above the free surface
stop = True
if q[-1][10] == q[-2][10]:
# Progress of motion of the plume has stopped
stop = True
# Update the index counter
k += 1
# Convert solution to numpy arrays
t = np.array(t)
q = np.array(q)
# Show user the final calculated point and return the solution
print(' Distance: %g (m), time: %g (s), k: %d' % \
(q[-1,10], t[-1], k))
return (t, q)
def correct_temperature(r, particles):
"""
Make sure the correct temperature is stored in the state space solution
When the dispersed phase particles equilibrate to their surrounding
temperature, heat transfer is turned off by the methods in
`dispersed_phases.Particle`. This is needed to prevent numerical
oscillation as the particle becomes small. Unfortunately, it is not as
easy to make the numerical solution compute the correct result once
particle temperature effectively stops being a state space variable since
the state space is intrinsic to the ODE solver. The derivatives function
computes the correct heat transfer based on the correct state space, but
the state space in the ODE solver remains fixed.
Since the solution for heat in the state space of the ODE solver is the
wrong value, we have to change the external version of the state space
before saving the solution to the current model step. This follows the
same method and reasoning as the similar function in
`smp.correct_temperature`.
Hence, the purpose of this function is to overwrite the state space
solution containing the particle heat that is extrinsic to the ODE solver
and which is used to store the state space following each time step.
The allows the correct temperature to be stored in the model solution.
Parameters
----------
r : `scipy.integrate.ode` object
ODE solution containing the current values of the state space in
the solver's extrinsic data. These values are editable, but an
intrinsic version of these data are used when the solver makes
calculations; hence, editing this file does not change the state
space stored in the actual solver.
particles : list of `Particle` objects
List of `bent_plume_model.Particle` objects containing the dispersed
phase local conditions and behavior.
Returns
-------
r : `sciply.integrate.ode` object
The updated extrinsic state space with the correct values for heat
as were used in the calcualtion.
"""
# Find the heat conservation equation in the model state space for the
# particles and replace r.y with the correct values.
idx = 11
for i in range(len(particles)):
idx += particles[i].particle.nc
r.y[idx] = np.sum(particles[i].m) * particles[i].nbe * \
particles[i].cp * particles[i].T
# Advance for heat, time, and position
idx += 1 + 1 + 3
# Return the corrected solution
return r
def correct_particle_tracking(r, particles):
"""
Remove the particle tracking solution after particles exit plume
Even though the particle tracking stops as needed once the particles
leave the plume, the post processing algorithm has now way to know if a
given state space solution is before or after particle tracking has
stopped. This function simply replaces the particle position after
integration has stopped (e.g., after the particles leave the plume) with
NaN so that the post-processor always knows whether the solution in the
state space is valid or not. This is necessary since the solution for
particle position is in local plume coordinates (l,n,m); hence, it is not
possible to know the (x,y,z) position unless the correct local plume
element is known. This function makes sure that every valid (l,n,m) is
stored with the corresponding element.
Parameters
----------
r : `scipy.integrate.ode` object
ODE solution containing the current values of the state space in
the solver's extrinsic data. These values are editable, but an
intrinsic version of these data are used when the solver makes
calculations; hence, editing this file does not change the state
space stored in the actual solver.
particles : list of `Particle` objects
List of `bent_plume_model.Particle` objects containing the dispersed
phase local conditions and behavior.
Returns
-------
r : `sciply.integrate.ode` object
The updated extrinsic state space with the correct values for heat
as were used in the calcualtion.
"""
# Skip through the single-phase state space
idx = 11
# Check each particle to determine whether they are inside or outside
# the plume
for i in range(len(particles)):
if not particles[i].integrate:
# Skip the masses, temperature, and time
idx += particles[i].particle.nc + 2
# Particle is outside the plume; replace the coordinates with
# np.nan
r.y[idx:idx+3] = np.nan
idx += 3
else:
# Skip the masses, temperature, time, and coordinates
idx += particles[i].particle.nc + 5
# Check if the integration should stop
if particles[i].p_fac == 0.:
# Stop tracking the particle inside the plume
particles[i].integrate = False
# Store the properties at the exit point
particles[i].te = particles[i].t
particles[i].xe = particles[i].x
particles[i].ye = particles[i].y
particles[i].ze = particles[i].z
particles[i].me = particles[i].m
particles[i].Te = particles[i].T
# Return the corrected solution
return r
def entrainment(q0_local, q1_local, p):
"""
Compute the total shear and forced entrainment at one time step
Computes the local entrainment (kg/s) as a combination of shear
entrainment and forced entrainment for a local Lagrangian element. This
function follows the approach in Lee and Cheung (1990) to compute both
types of entrainment, but uses the formulation in Jirka (2004) for the
shear entrainment term. Like Lee and Cheung (1990), it uses the maximum
entrainment hypothesis: entrainment = max (shear, forced), with the
exception that a pure coflowing momentum jet has entrainment = shear +
forced. This function also makes one correction that in pure coflow
the forced entrainment should be computed by integrating around the entire
jet, and not just the half of the jet exposed to the current.
Parameters
----------
q0_local : `bent_plume_model.LagElement`
Object containing the numerical solution at the previous time step
q1_local : `bent_plume_model.LagElement`
Object containing the numerical solution at the current time step
p : `ModelParams` object
Object containing the fixed model parameters for the bent
plume model.
Returns
-------
md : float
Total entrainment (kg/s)
Notes
-----
The entrainment computed here is already integrated over the current
Lagrangian element surface area. Hence, the result is (kg/s) into the
element.
"""
# Find the magnitude and direction of the velocity vector in q1_local
Ua = np.sqrt(q1_local.ua**2 + q1_local.va**2 + q1_local.wa**2)
Phi_a = np.arctan2(q1_local.wa, np.sqrt(q1_local.ua**2 + q1_local.va**2))
Theta_a = np.arctan2(q1_local.va, q1_local.ua)
# Get the component of the ambient current along the plume centerline
Us = Ua * np.cos(q1_local.phi - Phi_a) * np.cos(q1_local.theta - Theta_a)
# Get the sines and cosines of the new angles
sin_t = np.sin(q1_local.theta - Theta_a)
sin_p = np.sin(q1_local.phi - Phi_a)
cos_t = np.cos(q1_local.theta - Theta_a)
cos_p = np.cos(q1_local.phi - Phi_a)
cos_t0 = np.cos(q0_local.theta - Theta_a)
cos_p0 = np.cos(q0_local.phi - Phi_a)
# Get the shear entrainment coefficient for the top-hat | |
29 )
plt.gca().locator_params(axis='x', tight = True, nbins=4)
# plt.gca().locator_params(axis='y', tight = True, nbins=5)
plt.xlabel('Translation rate [1/min]')
plt.ylabel('Period [min]')
plt.ylim(0,700)
my_figure.add_subplot(6,3,14)
plt.plot(translation_rate_results[:,0],
translation_rate_results[:,2], color = 'black')
plt.plot(translation_rate_results[:,0],
translation_rate_results[:,7], color = 'green')
plt.plot(translation_rate_results[:,0],
translation_rate_results[:,10], color = 'blue')
plt.axvline( 29 )
plt.gca().locator_params(axis='x', tight = True, nbins=4)
# plt.gca().locator_params(axis='y', tight = True, nbins=5)
# plt.fill_between(repression_threshold_results[:,0],
# repression_threshold_results[:,2] + repression_threshold_results[:,3],
# np.max(repression_threshold_results[:,2]- repression_threshold_results[:,3],0),
# lw = 0, color = 'grey')
plt.xlabel('Translation rate [1/min]')
plt.ylabel('Coherence')
plt.ylim(0,1)
my_figure.add_subplot(6,3,15)
plt.errorbar(translation_rate_results[:,0],
translation_rate_results[:,3]/10000,
yerr = translation_rate_results[:,4]/10000, color = 'black')
plt.errorbar(translation_rate_results[:,0],
translation_rate_results[:,8]/10000,
yerr = translation_rate_results[:,9]/10000, color = 'green')
plt.gca().locator_params(axis='x', tight = True, nbins=4)
plt.plot(translation_rate_results[:,0],
translation_rate_results[:,5]/10000, color = 'grey', zorder = 2)
plt.axvline( 29 )
plt.ylim(0,15)
plt.xlabel('Translation rate [1/min]')
plt.ylabel('Expression/1e4')
########
#
# TRANSCRIPTION RATE
#
########
transcription_rate_results = np.zeros((number_of_parameter_points,12))
index = 0
for alpha_m in np.linspace(1.0,100.0,number_of_parameter_points):
these_rna_values, these_protein_values = hes5.generate_multiple_trajectories(
number_of_trajectories = number_of_trajectories,
duration = 1500,
repression_threshold = 31400,
mRNA_degradation_rate = np.log(2)/30.0,
protein_degradation_rate = np.log(2)/90,
translation_rate = 29,
basal_transcription_rate = alpha_m,
transcription_delay = 29,
initial_mRNA = 3,
initial_protein = 31400,
equilibration_time = 1000)
these_langevin_rna_values, these_langevin_protein_values = hes5.generate_multiple_langevin_trajectories(
number_of_trajectories = number_of_trajectories*2,
duration = 1500*5,
repression_threshold = 31400,
mRNA_degradation_rate = np.log(2)/30.0,
protein_degradation_rate = np.log(2)/90,
translation_rate = 29,
basal_transcription_rate = alpha_m,
transcription_delay = 29,
initial_mRNA = 3,
initial_protein = 31400,
equilibration_time = 1000)
_, this_coherence, this_period = hes5.calculate_power_spectrum_of_trajectories(
these_protein_values )
_, this_langevin_coherence, this_langevin_period = hes5.calculate_power_spectrum_of_trajectories(
these_langevin_protein_values )
_, this_ode_mean = hes5.calculate_steady_state_of_ode(
repression_threshold = 31400,
mRNA_degradation_rate = np.log(2)/30,
protein_degradation_rate = np.log(2)/90,
translation_rate = 29,
basal_transcription_rate = alpha_m
)
this_theoretical_power_spectrum = hes5.calculate_theoretical_power_spectrum_at_parameter_point(
repression_threshold = 31400,
mRNA_degradation_rate = np.log(2)/30.0,
protein_degradation_rate = np.log(2)/90,
translation_rate = 29,
basal_transcription_rate = alpha_m,
transcription_delay = 29)
this_theoretical_coherence, this_theoretical_period = hes5.calculate_coherence_and_period_of_power_spectrum(
this_theoretical_power_spectrum)
#
transcription_rate_results[index,0] = alpha_m
transcription_rate_results[index,1] = this_period
transcription_rate_results[index,2] = this_coherence
transcription_rate_results[index,3] = np.mean(these_protein_values[:,1:])
transcription_rate_results[index,4] = np.std(these_protein_values[:,1:])
transcription_rate_results[index,5] = this_ode_mean
transcription_rate_results[index,6] = this_langevin_period
transcription_rate_results[index,7] = this_langevin_coherence
transcription_rate_results[index,8] = np.mean(these_langevin_protein_values[:,1:])
transcription_rate_results[index,9] = np.std(these_langevin_protein_values[:,1:])
transcription_rate_results[index,10] = this_theoretical_coherence
transcription_rate_results[index,11] = this_theoretical_period
index +=1
np.save(os.path.join(os.path.dirname(__file__),
'output','transcription_rate_results.npy'), transcription_rate_results)
# transcription_rate_results = np.load(os.path.join(os.path.dirname(__file__),
# 'output','transcription_rate_results.npy'))
my_figure.add_subplot(6,3,16)
plt.plot(transcription_rate_results[:,0],
transcription_rate_results[:,1], color = 'black')
plt.plot(transcription_rate_results[:,0],
transcription_rate_results[:,6], color = 'green')
plt.plot(transcription_rate_results[:,0],
transcription_rate_results[:,11], color = 'blue')
plt.axvline( 11 )
plt.gca().locator_params(axis='x', tight = True, nbins=4)
# plt.gca().locator_params(axis='y', tight = True, nbins=5)
plt.xlabel('Trancription rate [1/min]')
plt.ylabel('Period [min]')
plt.ylim(0,700)
my_figure.add_subplot(6,3,17)
plt.plot(transcription_rate_results[:,0],
transcription_rate_results[:,2], color = 'black')
plt.plot(transcription_rate_results[:,0],
transcription_rate_results[:,7], color = 'green')
plt.plot(transcription_rate_results[:,0],
transcription_rate_results[:,10], color = 'blue')
plt.axvline( 11 )
plt.gca().locator_params(axis='x', tight = True, nbins=4)
plt.xlabel('Transcription rate [1/min]')
plt.ylabel('Coherence')
plt.ylim(0,1)
my_figure.add_subplot(6,3,18)
# plt.plot(repression_threshold_results[:,0],
plt.errorbar(transcription_rate_results[:,0],
transcription_rate_results[:,3]/10000,
yerr = transcription_rate_results[:,4]/10000, color = 'black')
plt.errorbar(transcription_rate_results[:,0],
transcription_rate_results[:,8]/10000,
yerr = transcription_rate_results[:,9]/10000, color = 'green')
plt.plot(transcription_rate_results[:,0],
transcription_rate_results[:,5]/10000, color = 'grey')
plt.gca().locator_params(axis='x', tight = True, nbins=4)
plt.axvline( 11 )
plt.ylim(0,15)
plt.xlabel('Transcription rate [1/min]')
plt.ylabel('Expression/1e4')
plt.tight_layout()
plt.savefig(os.path.join(os.path.dirname(__file__),
'output','full_parameter_sweep_stochastic.pdf'))
def xest_make_full_parameter_sweep(self):
########
#
# REPRESSION THRESHOLD
#
########
number_of_parameter_points = 100
repression_threshold_results = np.zeros((number_of_parameter_points,4))
index = 0
for p0 in np.linspace(1,60000,number_of_parameter_points):
this_trajectory = hes5.generate_deterministic_trajectory( duration = 720,
repression_threshold = p0,
mRNA_degradation_rate = np.log(2)/30.0,
protein_degradation_rate = np.log(2)/90.0,
translation_rate = 230,
transcription_delay = 29.0,
initial_mRNA = 3.0,
initial_protein = 23000)
this_period, this_amplitude, this_variation = hes5.measure_period_and_amplitude_of_signal(this_trajectory[:,0],
this_trajectory[:,1])
repression_threshold_results[index,0] = p0
repression_threshold_results[index,1] = this_period
repression_threshold_results[index,2] = this_amplitude
repression_threshold_results[index,3] = this_variation
index +=1
my_figure = plt.figure( figsize = (4.5, 7.5) )
my_figure.add_subplot(521)
plt.plot(repression_threshold_results[:,0]/10000,
repression_threshold_results[:,1], color = 'black')
# plt.axvline( 23000 )
plt.axvline( 2.3 )
# plt.gca().locator_params(axis='x', tight = True, nbins=3)
# plt.gca().xaxis.set_major_formatter(mpl.ticker.FormatStrFormatter('%0.0e'))
# plt.gca().ticklabel_format(axis = 'x', style = 'sci')
plt.ylim(0,700)
plt.xlabel('Repression threshold/1e4')
plt.ylabel('Period [min]')
my_figure.add_subplot(522)
# plt.plot(repression_threshold_results[:,0],
plt.plot(repression_threshold_results[:,0]/10000,
repression_threshold_results[:,2], color = 'black')
# plt.axvline( 23000 )
plt.axvline( 2.3 )
# plt.fill_between(repression_threshold_results[:,0],
# repression_threshold_results[:,2] + repression_threshold_results[:,3],
# np.max(repression_threshold_results[:,2]- repression_threshold_results[:,3],0),
# lw = 0, color = 'grey')
plt.xlabel('Repression threshold/1e4')
plt.ylabel('Relative amplitude')
plt.ylim(0,2)
########
#
# MRNA DEGRADATION
#
########
number_of_parameter_points = 100
mrna_degradation_results = np.zeros((number_of_parameter_points,4))
index = 0
for mu_m in np.linspace(0.00,np.log(2)/15,number_of_parameter_points):
this_trajectory = hes5.generate_deterministic_trajectory( duration = 720.0,
repression_threshold = 23000,
mRNA_degradation_rate = mu_m,
protein_degradation_rate = np.log(2)/90.0,
translation_rate = 230,
transcription_delay = 29.0,
initial_mRNA = 3.0,
initial_protein = 23000)
this_period, this_amplitude, this_variation = hes5.measure_period_and_amplitude_of_signal(this_trajectory[:,0],
this_trajectory[:,1])
mrna_degradation_results[index,0] = mu_m
mrna_degradation_results[index,1] = this_period
mrna_degradation_results[index,2] = this_amplitude
mrna_degradation_results[index,3] = this_variation
index +=1
my_figure.add_subplot(523)
plt.plot(mrna_degradation_results[:,0],
mrna_degradation_results[:,1], color = 'black')
plt.axvline( np.log(2)/30 )
plt.gca().locator_params(axis='x', tight = True, nbins=4)
plt.xlabel('mRNA degradation [1/min]')
plt.ylabel('Period [min]')
plt.ylim(0,700)
my_figure.add_subplot(524)
plt.plot(mrna_degradation_results[:,0],
mrna_degradation_results[:,2], color = 'black')
plt.gca().locator_params(axis='x', tight = True, nbins=4)
plt.axvline( np.log(2)/30 )
# plt.fill_between(repression_threshold_results[:,0],
# repression_threshold_results[:,2] + repression_threshold_results[:,3],
# np.max(repression_threshold_results[:,2]- repression_threshold_results[:,3],0),
# lw = 0, color = 'grey')
plt.xlabel('mRNA degradation [1/min]')
plt.ylabel('Relative amplitude')
plt.ylim(0,2)
########
#
# PROTEIN DEGRADATION
#
########
number_of_parameter_points = 100
protein_degradation_results = np.zeros((number_of_parameter_points,4))
index = 0
for mu_p in np.linspace(0.00,np.log(2)/15,number_of_parameter_points):
this_trajectory = hes5.generate_deterministic_trajectory( duration = 720.0,
repression_threshold = 23000,
mRNA_degradation_rate = np.log(2)/30.0,
protein_degradation_rate = mu_p,
translation_rate = 230,
transcription_delay = 29.0,
initial_mRNA = 3.0,
initial_protein = 23000)
this_period, this_amplitude, this_variation = hes5.measure_period_and_amplitude_of_signal(this_trajectory[:,0],
this_trajectory[:,1])
protein_degradation_results[index,0] = mu_p
protein_degradation_results[index,1] = this_period
protein_degradation_results[index,2] = this_amplitude
protein_degradation_results[index,3] = this_variation
index +=1
my_figure.add_subplot(525)
plt.plot(protein_degradation_results[:,0],
protein_degradation_results[:,1], color = 'black')
plt.axvline( np.log(2)/90 )
plt.gca().locator_params(axis='x', tight = True, nbins=4)
plt.xlabel('Protein degradation [1/min]')
plt.ylabel('Period [min]')
plt.ylim(0,700)
my_figure.add_subplot(526)
plt.plot(protein_degradation_results[:,0],
protein_degradation_results[:,2], color = 'black')
plt.axvline( np.log(2)/90 )
plt.gca().locator_params(axis='x', tight = True, nbins=4)
# plt.fill_between(repression_threshold_results[:,0],
# repression_threshold_results[:,2] + repression_threshold_results[:,3],
# np.max(repression_threshold_results[:,2]- repression_threshold_results[:,3],0),
# lw = 0, color = 'grey')
plt.xlabel('Protein degradation [1/min]')
plt.ylabel('Relative amplitude')
plt.ylim(0,2)
########
#
# TIME DELAY
#
########
number_of_parameter_points = 100
time_delay_results = np.zeros((number_of_parameter_points,4))
index = 0
for tau in np.linspace(5.0,40.0,number_of_parameter_points):
this_trajectory = hes5.generate_deterministic_trajectory( duration = 720.0,
repression_threshold = 23000,
mRNA_degradation_rate = np.log(2)/30.0,
protein_degradation_rate = np.log(2)/90.0,
translation_rate = 230,
transcription_delay = tau,
initial_mRNA = 3.0,
initial_protein = 23000)
this_period, this_amplitude, this_variation = hes5.measure_period_and_amplitude_of_signal(this_trajectory[:,0],
this_trajectory[:,1])
time_delay_results[index,0] = tau
time_delay_results[index,1] = this_period
time_delay_results[index,2] = this_amplitude
time_delay_results[index,3] = this_variation
index +=1
my_figure.add_subplot(527)
plt.plot(time_delay_results[:,0],
time_delay_results[:,1], color = 'black')
plt.axvline( 29.0 )
# plt.gca().locator_params(axis='x', tight = True, nbins=4)
plt.xlabel('Time delay [min]')
plt.ylabel('Period [min]')
plt.ylim(0,700)
my_figure.add_subplot(528)
plt.plot(time_delay_results[:,0],
time_delay_results[:,2], color = 'black')
plt.axvline( 29.0 )
# plt.gca().locator_params(axis='x', tight = True, nbins=4)
# plt.fill_between(repression_threshold_results[:,0],
# repression_threshold_results[:,2] + repression_threshold_results[:,3],
# np.max(repression_threshold_results[:,2]- repression_threshold_results[:,3],0),
# lw = 0, color = 'grey')
plt.xlabel('Time delay [min]')
plt.ylabel('Relative amplitude')
plt.ylim(0,2)
########
#
# TRANSLATION RATE
#
########
number_of_parameter_points = 100
translation_rate_results = np.zeros((number_of_parameter_points,4))
index = 0
for alpha_p in np.linspace(1.0,400.0,number_of_parameter_points):
this_trajectory = hes5.generate_deterministic_trajectory( duration = 720.0,
repression_threshold = 23000,
mRNA_degradation_rate = np.log(2)/30.0,
protein_degradation_rate = np.log(2)/90.0,
translation_rate = alpha_p,
transcription_delay = 29.0,
initial_mRNA = 3.0,
initial_protein = 23000)
this_period, this_amplitude, this_variation = hes5.measure_period_and_amplitude_of_signal(this_trajectory[:,0],
this_trajectory[:,1])
translation_rate_results[index,0] = alpha_p
translation_rate_results[index,1] = this_period
translation_rate_results[index,2] = this_amplitude
translation_rate_results[index,3] = this_variation
index +=1
my_figure.add_subplot(529)
plt.plot(translation_rate_results[:,0],
translation_rate_results[:,1], color = 'black')
plt.axvline( 230 )
plt.gca().locator_params(axis='x', tight = True, nbins=4)
# plt.gca().locator_params(axis='y', tight = True, nbins=5)
plt.xlabel('Translation rate [1/min]')
plt.ylabel('Period [min]')
plt.ylim(0,700)
my_figure.add_subplot(5,2,10)
plt.plot(translation_rate_results[:,0],
translation_rate_results[:,2], color = 'black')
plt.axvline( 230 )
plt.gca().locator_params(axis='x', tight = True, nbins=4)
# plt.gca().locator_params(axis='y', tight = True, nbins=5)
# plt.fill_between(repression_threshold_results[:,0],
# repression_threshold_results[:,2] + repression_threshold_results[:,3],
# np.max(repression_threshold_results[:,2]- repression_threshold_results[:,3],0),
# lw = 0, color = 'grey')
plt.xlabel('Translation rate [1/min]')
plt.ylabel('Relative amplitude')
plt.ylim(0,2)
plt.tight_layout()
plt.savefig(os.path.join(os.path.dirname(__file__),
'output','full_parameter_sweep.pdf'))
def xest_different_tau_values(self):
my_figure = plt.figure( figsize = (4.5, 2.5) )
first_trajectory = hes5.generate_deterministic_trajectory( duration = 720,
repression_threshold = 23000,
mRNA_degradation_rate = np.log(2)/30.0,
protein_degradation_rate = np.log(2)/90.0,
translation_rate = 230,
transcription_delay = 29.0,
initial_mRNA = 3.0,
initial_protein = 23000)
second_trajectory = hes5.generate_deterministic_trajectory( duration = 720,
repression_threshold = 23000,
mRNA_degradation_rate = np.log(2)/30.0,
protein_degradation_rate = np.log(2)/90.0,
translation_rate = 230,
transcription_delay = 12.0,
initial_mRNA = 3.0,
initial_protein = 23000)
third_trajectory = hes5.generate_deterministic_trajectory( duration = 720,
repression_threshold = 23000,
mRNA_degradation_rate = np.log(2)/30.0,
protein_degradation_rate = np.log(2)/90.0,
translation_rate = 230,
transcription_delay = 20.0,
initial_mRNA = 3.0,
initial_protein = 23000)
fourth_trajectory = hes5.generate_deterministic_trajectory( duration = 720,
repression_threshold = 23000,
mRNA_degradation_rate = np.log(2)/30.0,
protein_degradation_rate = np.log(2)/90.0,
translation_rate = 230,
transcription_delay = 40.0,
initial_mRNA = 3.0,
initial_protein = 23000)
standard_p0 = 100.0/np.power(29.0,2),
plt.plot(first_trajectory[:,0], first_trajectory[:,2], color = 'blue',
label = r'$\tau=29$' )
plt.plot(second_trajectory[:,0], second_trajectory[:,2], color = 'green', linestyle = '--',
dashes = [3,1], label = r'$\tau=12$')
plt.plot(third_trajectory[:,0], third_trajectory[:,2], color = 'orange', linestyle = '--',
dashes = [2,0.5], label = r'$\tau=20$')
plt.plot(fourth_trajectory[:,0], fourth_trajectory[:,2], color = 'purple', linestyle = '--',
dashes = [0.5,0.5], label = r'$\tau=40$')
# plt.ylim(0,1.)
plt.xlabel('Rescaled time')
plt.ylabel('Rescaled protein')
plt.legend()
plt.tight_layout()
plt.savefig(os.path.join(os.path.dirname(__file__),
'output','tau_investigation.pdf'))
def xest_different_protein_degradation_values(self):
my_figure = plt.figure( figsize = (6.5, 2.5) )
first_trajectory = hes5.generate_deterministic_trajectory( duration = 720,
repression_threshold = 23000,
mRNA_degradation_rate = np.log(2)/30.0,
protein_degradation_rate = np.log(2)/90.0,
translation_rate = 230,
transcription_delay = 29.0,
initial_mRNA = 3.0,
initial_protein = 23000)
second_trajectory = hes5.generate_deterministic_trajectory( duration = 720,
repression_threshold | |
<reponame>sfu-arch/TensorBricks
import math
from tqdm import tqdm
def calculate_mac_utilization(self, num_h_convs, num_w_convs, num_cin, num_f, num_macs_w, hw_params):
cin_util, cin_fold = self.get_mac_utilization(num_cin, hw_params.mac_cxx)
w_util, w_fold = self.get_mac_utilization(num_w_convs, num_macs_w)
f_util, f_fold = self.get_mac_utilization(num_f, hw_params.mac_fx)
mac_cycles = num_h_convs * w_fold * cin_fold * f_fold
mac_util_cycles = (w_util * cin_util * f_util) * mac_cycles
# Padd calculation
padd_utilization = w_util * f_util
padd_cycles = f_fold * w_fold * num_h_convs
padd_util_cycles = padd_utilization * padd_cycles
return mac_util_cycles, mac_cycles, padd_util_cycles, padd_cycles
def conv_cycles(layer_attr, hw_params, init_start_cout_idx, init_end_cout_idx):
hin = 0
win = 0
cin=0
start_hout_idx= 0
start_wout_idx= 0
isconv_layer = (layer_attr.Kx != 1) or (layer_attr.Ky != 1)
# ------ h parameter calculations
end_hin_idx = min(hin + hw_params.X, layer_attr.Hin) - 1
num_hin = end_hin_idx - hin + 1
if num_hin < layer_attr.Kx:
num_h_convs = 1
else:
num_h_convs = int(num_hin - layer_attr.Kx / layer_attr.Sx) + 1
end_hout_idx = start_hout_idx + num_h_convs - 1
num_hout = end_hout_idx - start_hout_idx + 1
# ------ w parameter calculations
end_win_idx = min(win + hw_params.wxx, layer_attr.Win) - 1
num_win = end_win_idx - win + 1
if num_win < layer_attr.Ky:
num_w_convs = 1
else:
num_w_convs = int((num_win - layer_attr.Ky) / layer_attr.Sy) + 1
# for DW num_macs_w == 1 and for PW num_macs_w == 3
num_macs_w = hw_params.mac_wxx - layer_attr.Ky + 1
end_wout_idx = start_wout_idx + num_w_convs - 1
num_wout = end_wout_idx - start_wout_idx + 1
# ------ c parameter calculations
start_cin_idx = cin
end_cin_idx = min(start_cin_idx + hw_params.cxx, layer_attr.Cin) - 1
num_cin = end_cin_idx - start_cin_idx + 1
mac_cycles_all_filters = 0
util_cycles_all_filters = 0
padd_util_cycles_all_filters = 0
padd_cycles_all_filters = 0
for f in range(init_start_cout_idx, init_end_cout_idx, hw_params.fx):
padd_cycles = 0
padd_util_cycles = 0
# ------ f parameter calculations
end_f_idx = min(f + hw_params.fx, init_end_cout_idx + 1) - 1
num_f = end_f_idx - f + 1
# --------------------------------------
# mac utilization
# --------------------------------------
mac_util_cycles, mac_cycles, padd_util_cycles, padd_cycles = calculate_mac_utilization(num_h_convs,
num_w_convs,
num_cin,
num_f, num_macs_w,
hw_params)
# --------------------------------------
# Accumulate per filter stats
# to be used in P2 in PDP
# --------------------------------------
util_cycles_all_filters += mac_util_cycles
mac_cycles_all_filters += mac_cycles
# -- padd logic --
# -- for all PCONV/CONV. Since, in P2 of PDP padds can be done as soon as hx*wx*fx
# is available. This will be done untile hx*wx*F partial product.
if cin != 0:
padd_util_cycles_all_filters += padd_util_cycles
padd_cycles_all_filters += padd_cycles
# end f
# return util_cycles_all_filters,mac_cycles_all_filters, padd_util_cycles_all_filters, padd_cycles_all_filters
return mac_cycles_all_filters, util_cycles_all_filters, num_h_convs, num_w_convs
def sram_traffic(
dimension_rows=4,
dimension_cols=4,
ifmap_h=7, ifmap_w=7,
filt_h=3, filt_w=3,
num_channels=3,
strides=1, num_filt=8,
ofmap_base=2000000, filt_base=1000000, ifmap_base=0,
# sram_read_trace_file="sram_read.csv",
# sram_write_trace_file="sram_write.csv"
):
# Dimensions of output feature map channel
E_h = math.floor((ifmap_h - filt_h + strides) / strides)
E_w = math.floor((ifmap_w - filt_w + strides) / strides)
# Number of pixels in one convolution window
px_per_conv_window = filt_h * filt_w * num_channels
r2c = px_per_conv_window
# Total number of ofmap px across all channels
num_ofmap_px = E_h * E_w * num_filt
e2 = E_h * E_w
e2m = num_ofmap_px
# Variables to calculate folds in runtime
num_h_fold = 1
num_v_fold = 1
max_parallel_window = 1
# Variables for utilization calculation
util = 0
compute_cycles = 0
if dimension_rows < px_per_conv_window:
num_h_fold = math.ceil(px_per_conv_window / dimension_rows)
else:
max_parallel_window = math.floor(dimension_rows / px_per_conv_window)
reqd_cols = num_filt # Total number of cols to be mapped
max_cols_per_v_fold = max_parallel_window * dimension_cols
num_v_folds = math.ceil(reqd_cols / max_cols_per_v_fold)
remaining_cols = reqd_cols
cycles = 0
prev_cycl = 0
# print("Vertical folds = " + str(num_v_folds))
# print("Horizontal folds = " + str(num_h_fold))
# These are the starting addresses of filter weights in the memory
all_col_addr_list = []
# for c in range(num_filt):
# addr = (c) * r2c + filt_base
# all_col_addr_list.append(addr)
# These are the starting addresses of ifmap windows in the memory
hc = ifmap_w * num_channels
all_ifmap_base_addr = []
# for px in range(int(e2)): # number of ofmap px in a ofmap channel
# addr = (px / E_w) * strides * hc + (px % E_w) * strides
# all_ifmap_base_addr.append(addr)
filter_load_cycles = 0
for v in range(int(num_v_folds)):
# print("V fold id: " + str(v))
# Take a slice of the starting addresses that are relevant for this v_fold
cols_this_fold = min(remaining_cols, max_parallel_window * dimension_cols)
idx_start = v * dimension_cols
idx_end = idx_start + cols_this_fold
col_addr_list = all_col_addr_list[idx_start:idx_end]
if num_h_fold > 1:
rem_h = r2c # Tracks the elements processed within a conv filter
next_ifmap_addr = ifmap_base # Starts from the top left corner of the IFMAP matrix
for h in range(num_h_fold):
rows_this_fold = min(rem_h, dimension_rows)
# print("h fold id: " + str(h))
# Values returned
# cycles -> Cycle count for the next operation ie. cycles elapsed + 1
# col_addr_list -> The starting filter address for the next iteration
cycles, col_addr_list = gen_trace_filter_partial(
col_addrs=col_addr_list,
cycle=cycles,
num_rows=dimension_rows,
remaining=rows_this_fold
)
# print("Weights loaded by " + str(cycles) + " cycles")
data_out_cycles = cycles # Store this cycle for parallel readout
filter_load_cycles += data_out_cycles
cycles_ifmap = gen_trace_ifmap_partial(
cycle=cycles,
num_rows=dimension_rows, num_cols=dimension_cols,
num_filters=num_filt,
remaining=rem_h,
remaining_filters=remaining_cols,
ifmap_h=ifmap_h, ifmap_w=ifmap_w,
filt_h=filt_h, filt_w=filt_w,
num_channels=num_channels
)
cycles_ofmap = gen_trace_ofmap(
cycle=data_out_cycles,
num_rows=dimension_rows,
num_cols=dimension_cols,
ofmap_base=ofmap_base,
window_size=rows_this_fold,
parallel_window=1,
num_ofmap_px=int(e2),
filters_done=(v * dimension_cols),
num_filter=num_filt
)
# print("IFMAPS processed by " + str(cycles) + " cycles")
util_this_fold = (rows_this_fold * cols_this_fold) / (dimension_rows * dimension_cols)
rem_h -= rows_this_fold
cycles = max(cycles_ifmap, cycles_ofmap)
del_cycl = cycles - prev_cycl
util += util_this_fold * del_cycl
compute_cycles += del_cycl
prev_cycl = cycles
else:
# filters_this_fold = min(remaining_cols, max_cols_per_v_fold)
filt_done = v * max_parallel_window * dimension_cols
rem = num_filt - filt_done
parallel_window = math.ceil(rem / dimension_cols)
parallel_window = int(min(max_parallel_window, parallel_window))
cycles_filter = gen_filter_trace(
cycle=cycles,
num_rows=dimension_rows, num_cols=dimension_cols,
filt_h=filt_h, filt_w=filt_w, num_channels=num_channels,
col_addr=col_addr_list,
parallel_window=parallel_window,
filters_this_fold=cols_this_fold
)
cycles_ifmap, rows_this_fold \
= gen_ifmap_trace(
cycle=cycles_filter,
num_rows=dimension_rows, num_cols=dimension_cols,
ifmap_h=ifmap_h, ifmap_w=ifmap_w,
filt_h=filt_h, filt_w=filt_w,
num_channels=num_channels, stride=strides,
parallel_window=parallel_window
)
cycles_ofmap = gen_trace_ofmap(
cycle=cycles_filter,
num_rows=dimension_rows, num_cols=dimension_cols,
ofmap_base=ofmap_base,
parallel_window=parallel_window,
window_size=r2c,
num_ofmap_px=int(e2),
filters_done=int(v * max_parallel_window * dimension_cols),
num_filter=num_filt
)
cycles = max(cycles_ifmap, cycles_ofmap)
del_cycl = cycles - prev_cycl
# Since multiple filters are being mapped on a single col due to large number of rows
# util calculation is a little involved,
# cols_this_fold --> number of filters mapped this fold
rem = cols_this_fold
tmp_util = 0
for _ in range(parallel_window):
col_used = min(rem, dimension_cols)
row_used = r2c # Number of row used will always be in multiple of r2c,
# parallel window calc took care of this
tmp_util += row_used * col_used
rem -= col_used
# util_this_fold = (rows_this_fold * cols_this_fold) /(dimension_rows * dimension_cols)
util_this_fold = tmp_util / (dimension_rows * dimension_cols)
util += util_this_fold * del_cycl
compute_cycles += del_cycl
prev_cycl = cycles
remaining_cols -= cols_this_fold
# final = str(cycles)
final_util = (util / compute_cycles) * 100
# print('final_util: {}'.format(final_util))
# print("Compute finished at: " + str(final) + " cycles")
# cycles = cycles - filter_load_cycles
# compute_cycles = compute_cycles - filter_load_cycles
return cycles, compute_cycles, util, E_h, E_w
def gen_filter_trace(
cycle=0,
num_rows=4, num_cols=4,
filt_h=3, filt_w=3, num_channels=3,
col_addr=[],
parallel_window=1,
filters_this_fold=4
):
# There is no data from the left side till the weights are fed in
# This prefix is to mark the blanks
prefix = ""
for r in range(num_rows):
prefix += ", "
# Calculate the convolution window size
r2c = filt_h * filt_w * num_channels
rem = filters_this_fold # Track the number of filters yet to process
# For each wrap around
for w in range(parallel_window):
# Number of active columns in this wrap
cols = min(num_cols, rem)
rem -= cols
# For each row in the window
cycle += r2c
return cycle
def gen_ifmap_trace(
cycle=0,
num_rows=4, num_cols=4,
ifmap_h=7, ifmap_w=7,
filt_h=3, filt_w=3,
num_channels=3, stride=1,
parallel_window=1
):
postfix = ""
for c in range(num_cols):
postfix += ", "
E_h = math.floor((ifmap_h - filt_h + stride) / stride)
E_w = math.floor((ifmap_w - filt_w + stride) / stride)
e2 = E_h * E_w
r2c = filt_h * filt_w * num_channels
rc = filt_w * num_channels
hc = ifmap_w * num_channels
idle = num_rows - (r2c * parallel_window)
idle = max(idle, 0)
used_rows = num_rows - idle
# Adding entries for columns and empty | |
"""
Train the model. Use generators for data preparation and model_handler for access.
Generators have to be set in constructor of subclasses.
:param hparams: Hyper-parameter container.
:return: A tuple of (all test loss, all training loss, the model_handler object).
"""
self.sanity_check_train(hparams)
self.logger.info(hparams.get_debug_string())
network_summary = summary(self.model_handler.model, depth=100, verbose=0)
self.logger.info(network_summary)
if self.tb_writer is not None:
self.tb_writer.add_text("HParams", "<pre>" + hparams.get_debug_string() + "</pre>")
# self.tb_writer.add_graph(self.model_handler.model) # This would require an input.
self.tb_writer.add_text("Network", "<pre>" + str(network_summary) + "</pre>")
# Skip training if epochs is not greater 0.
if hparams.epochs <= 0:
self.logger.info("Number of training epochs is {}. Skipping training.".format(hparams.epochs))
return list(), list(), self.model_handler
self.logger.info("Training set size: {}".format(len(self.id_list_train)))
if self.id_list_val is not None and len(self.id_list_val) > 0:
self.log_validation_set()
self.log_test_set()
# Setup components.
if self.total_epoch is None: # TODO: remove again
self.total_epoch = self.total_steps // len(self.dataset_train)
elif self.total_steps is None:
self.total_steps = self.total_epoch * len(self.dataset_train)
self.model_handler.set_dataset(hparams, self.dataset_train, self.dataset_val, self.batch_collate_fn)
self.model_handler.set_optimiser(hparams)
self.model_handler.set_scheduler(hparams,
self.total_epoch if hparams.use_saved_learning_rate else 0,
self.total_steps if hparams.use_saved_learning_rate else 0)
self.model_handler.set_losses(self.loss_modules)
start_epoch = self.total_epoch
start_step = self.total_steps
steps_per_training_epoch = len(self.model_handler.dataloader_train) // hparams.batch_size_train
self.log_memory(hparams.use_gpu)
t_start = timer()
self.logger.info('Start training: {}'.format(
datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
logging.info("Train parameter:\n\t" + "\n\t".join(
sorted({"{} {}: {} {}".format(k, list(v.shape), v.requires_grad,
v.device)
for k, v in self.model_handler.model.named_parameters()})))
# Compute error before first iteration.
self.trained_epochs = []
if hparams.start_with_test:
self.logger.info('Test epoch [{}/{}]:'.format(start_epoch, start_epoch + hparams.epochs))
loss_dict = self.model_handler.test(hparams=hparams, total_epoch=start_epoch, total_steps=start_step,
current_epoch=start_epoch)
scheduler_loss = self.model_handler.get_summed_losses_subset(
losses=loss_dict,
loss_names=hparams.scheduler_loss_names)
if np.isnan(self.best_loss) or scheduler_loss < self.best_loss:
self.best_loss = scheduler_loss
self.record_validation_loss(loss_dict, self.total_epoch)
for current_epoch in range(1, hparams.epochs + 1):
self.logger.info('Train epoch [{}/{}], step [{}/{}]:'.format(
self.total_epoch + 1,
start_epoch + hparams.epochs,
self.total_steps + 1,
start_step + hparams.epochs * steps_per_training_epoch))
loss_dict = self.model_handler.train(hparams=hparams, total_epoch=self.total_epoch,
total_steps=self.total_steps, current_epoch=current_epoch)
self.total_epoch += 1
self.total_steps += steps_per_training_epoch
if self._has_nan_loss(loss_dict):
break
self.record_train_loss(loss_dict, self.total_epoch)
current_model_saved = False
if self.total_epoch % hparams.epochs_per_test == 0:
self.logger.info('Test epoch [{}/{}]:'.format(self.total_epoch, start_epoch + hparams.epochs))
loss_dict = self.model_handler.test(hparams=hparams, total_epoch=self.total_epoch,
total_steps=self.total_steps, current_epoch=current_epoch)
if self._has_nan_loss(loss_dict):
break
self.record_validation_loss(loss_dict, self.total_epoch)
scheduler_loss = self.model_handler.get_summed_losses_subset(
losses=loss_dict,
loss_names=hparams.scheduler_loss_names)
if hparams.use_saved_learning_rate:
scheduler_epoch = self.total_epoch
else:
scheduler_epoch = current_epoch
assert scheduler_epoch is not None
self.model_handler.run_scheduler(hparams=hparams,
loss=scheduler_loss,
current_epoch=scheduler_epoch)
if hparams.out_dir is not None:
if np.isnan(self.best_loss) or scheduler_loss < self.best_loss:
self.best_loss = scheduler_loss
self.save_checkpoint(hparams=hparams,
save_as_best_model=True)
current_model_saved = True
if hparams.epochs_per_checkpoint > 0 \
and self.total_epoch % hparams.epochs_per_checkpoint == 0:
self.save_checkpoint(hparams=hparams)
current_model_saved = True
if hparams.out_dir is not None and hparams.load_newest_checkpoint \
and not current_model_saved:
self.save_checkpoint(hparams=hparams,
save_as_last_model=True)
t_training = timer() - t_start
self.logger.info('Training time: ' + str(timedelta(seconds=t_training)))
self.log_losses(start_epoch=start_epoch)
if hparams.out_dir is not None:
# Check if best model should be used as final model.
# Only possible when it was saved in out_dir.
if hparams.use_best_as_final_model:
self.load_best_model(hparams)
if hparams.save_final_model:
self.save_checkpoint(hparams)
return (*self.get_losses(), self.model_handler)
def sanity_check_train(self, hparams):
assert self.model_handler is not None, "The init function has not been called before training."
hparams.verify() # Verify that attributes were added correctly, print warning for wrongly initialized ones.
# Some sanity checks.
if hparams.epochs_per_scheduler_step:
if hparams.epochs_per_test > hparams.epochs_per_scheduler_step:
self.logger.warning("Model is validated only every {} epochs, ".format(hparams.epochs_per_test) +
"but scheduler is supposed to run every {} epochs.".format(
hparams.epochs_per_scheduler_step))
if hparams.epochs_per_test % hparams.epochs_per_scheduler_step != 0:
self.logger.warning("hparams.epochs_per_test ({}) % hparams.epochs_per_scheduler_step ({}) != 0. "
.format(hparams.epochs_per_test, hparams.epochs_per_scheduler_step) +
"Note that the scheduler is only run when current_epoch % " +
"hparams.epochs_per_scheduler_step == 0. Therefore hparams.epochs_per_scheduler_step " +
"should be a factor of hparams.epochs_per_test.")
def log_validation_set(self):
if self.id_list_val is not None:
sorted_keys = sorted(self.id_list_val)
self.logger.info("Validation set ({}): {}".format(
len(self.id_list_val), self.id_list_to_str(sorted_keys)))
def log_test_set(self):
if self.id_list_test is not None:
sorted_keys = sorted(self.id_list_test)
self.logger.info("Test set ({}): {}".format(
len(sorted_keys), self.id_list_to_str(sorted_keys)))
@staticmethod
def id_list_to_str(id_list):
return " ".join([os.path.join(os.path.split(os.path.dirname(id_name))[-1],
os.path.splitext(os.path.basename(id_name))[0]) for id_name in id_list])
@staticmethod
def _has_nan_loss(loss_dict):
for key, loss in loss_dict.items():
if np.isnan(loss).any():
return True
return False
def record_train_loss(self, loss_dict: Dict, epoch: int):
self.train_losses.append((loss_dict, epoch))
def record_validation_loss(self, loss_dict: Dict, epoch: int):
self.validation_losses.append((loss_dict, epoch))
def _get_loss_names(self):
if len(self.train_losses) > 0:
return list(self.train_losses[0][0].keys())
elif len(self.validation_losses) > 0:
return list(self.validation_losses[0][0].keys())
else:
return None
def log_losses(self, start_epoch: int = -1):
loss_names = self._get_loss_names()
if loss_names is None:
return
for loss_name in loss_names:
train_losses = np.array([loss[loss_name] for loss, epoch in self.train_losses
if epoch >= start_epoch])
validation_losses = np.array([loss[loss_name] for loss, epoch in self.validation_losses
if epoch >= start_epoch])
logging.info('Loss {} validation progress: '.format(loss_name)
+ ', '.join('{:.4f}'.format(loss) for loss in validation_losses))
logging.info('Loss {} train progress: '.format(loss_name)
+ ', '.join('{:.4f}'.format(loss) for loss in train_losses))
def get_losses(self, start_epoch: int = -1):
loss_names = self._get_loss_names()
if loss_names is None:
return
train_loss_dict = {}
validation_loss_dict = {}
for loss_name in loss_names:
train_losses = np.array([loss[loss_name] for loss, epoch in self.train_losses
if epoch >= start_epoch])
validation_losses = np.array([loss[loss_name] for loss, epoch in self.validation_losses
if epoch >= start_epoch])
train_loss_dict[loss_name] = train_losses
validation_loss_dict[loss_name] = validation_losses
return validation_loss_dict, train_loss_dict
def test(self, hparams):
self.model_handler.set_dataset(hparams, self.dataset_train, self.dataset_val, self.batch_collate_fn)
self.model_handler.set_losses(self.loss_modules)
self.log_validation_set()
loss_dict = self.model_handler.test(hparams=hparams, total_epoch=self.total_epoch, total_steps=self.total_steps,
current_epoch=self.total_epoch)
self.logger.info('\n\t' + '\n\t'.join('Loss {} validation: {:.4f}'.format(
loss_name, loss_value) for loss_name, loss_value in loss_dict.items()))
return loss_dict
def forward(self, hparams: ExtendedHParams, ids_input: Union[str, List[str], Tuple[str, ...], os.PathLike],
post_processing_mapping: Dict[str, str]):
"""
Forward all given ids through the network in batches of hparams.batch_size_val.
:param hparams: Hyper-parameter container.
:param ids_input: Can be full path to file with ids, list of ids, or one id.or None.
:return: (Dictionary of network outputs, dictionary of post-processed (by self.OutputGen) network outputs)
"""
assert self.model_handler is not None, "trainer.init() called before?"
id_list = self._input_to_str_list(ids_input)
self.logger.info("Start forwarding [{0}]".format(", ".join(str(i) for i in id_list)))
t_start = timer()
model_output, model_output_post = self._forward_batched(
batch_size=hparams.batch_size_val,
hparams=hparams,
id_list=id_list,
post_processing_mapping=post_processing_mapping,
benchmark=False,
gen_figure=False,
synth=False)
t_training = timer() - t_start
self.logger.info('Forwarding time for {} sample(s): {}'.format(len(id_list), timedelta(seconds=t_training)))
return model_output, model_output_post
def synth(self,
hparams: ExtendedHParams,
ids_input: Union[str, List[str], Tuple[str, ...], os.PathLike],
post_processing_mapping: Dict[str, str],
plotter_configs: List[DataPlotter.Config] = None):
"""
Synthesise all given ids with the self.synthesize function.
:param hparams: Hyper-parameter container.
:param ids_input: Can be full path to file with ids, list of ids, or one id.
:return: (Dictionary of network outputs, dictionary of post-processed (by self.OutputGen) network outputs)
"""
assert self.model_handler is not None, "trainer.init() called before?"
id_list = self._input_to_str_list(ids_input)
self.logger.info("Start synthesising [{0}]".format(
", ".join(str(i) for i in id_list)))
t_start = timer()
model_output, model_output_post = self._forward_batched(
batch_size=hparams.batch_size_synth,
hparams=hparams,
id_list=id_list,
post_processing_mapping=post_processing_mapping,
plotter_configs=plotter_configs,
benchmark=False,
gen_figure=hparams.synth_gen_figure,
synth=True)
t_training = timer() - t_start
self.logger.info('Synthesis time for {} sample(s): {}'.format(
len(id_list), timedelta(seconds=t_training)))
return model_output, model_output_post
def gen_figure(self,
hparams: ExtendedHParams,
ids_input: Union[str, List[str], Tuple[str, ...], os.PathLike],
post_processing_mapping: Dict[str, str],
plotter_configs: List[DataPlotter.Config]):
"""
Generate figures for all given ids with the self.gen_figure_from_output function (has to be implemented).
:param hparams: Hyper-parameter container.
:param ids_input: Can be full path to file with ids, list of ids, or one id.
:return: (Dictionary of network outputs, dictionary of post-processed (by self.OutputGen) network outputs)
"""
assert self.model_handler is not None, "trainer.init() called before?"
id_list = self._input_to_str_list(ids_input)
self.logger.info("Start generating figures for [{0}]".format(
", ".join(str(i) for i in id_list)))
t_start = timer()
model_output, model_output_post = self._forward_batched(
batch_size=hparams.batch_size_gen_figure,
hparams=hparams,
id_list=id_list,
post_processing_mapping=post_processing_mapping,
plotter_configs=plotter_configs,
benchmark=False,
gen_figure=True,
synth=False)
t_training = timer() - t_start
self.logger.info('Figure gen. time for {} sample(s): {}'.format(
len(id_list), timedelta(seconds=t_training)))
return model_output, model_output_post
def benchmark(self, hparams: ExtendedHParams, post_processing_mapping: Dict[str, str],
ids_input: Union[str, List[str], Tuple[str, ...], os.PathLike] = None):
"""
Benchmark the currently loaded model using the self.compute_score function (has to be implemented).
:param hparams: Hyper-parameter container.
:param ids_input: Can be full path to file with ids, list of ids, one id, or None.
If ids_inputs=None benchmark on test set if not None, otherwise on validation set.
:return: Score(s).
"""
assert callable(getattr(self, 'compute_score', None)), "Function has to be implemented for this trainer."
assert self.model_handler is not None, "trainer.init() called before?"
# Select test or validation set when ids are not given explicitly.
if ids_input is None:
if self.id_list_test is not None and len(self.id_list_test) > 0:
id_list = sorted(self.id_list_test)
self.logger.info("Start benchmark on test set ({}): [{}]"
.format(len(id_list), ", ".join(str(i) for i in id_list)))
elif self.id_list_val is not None and len(self.id_list_val) > 0:
id_list = sorted(self.id_list_val)
self.logger.info("Start benchmark on validation set ({}): [{}]"
.format(len(id_list), ", ".join(str(i) for i in id_list)))
else:
raise ValueError("No id list can be selected for benchmark, because non was given as parameter "
"and test and validation set are empty.")
else:
id_list = self._input_to_str_list(ids_input)
self.logger.info("Start benchmark on given input ({}): | |
<filename>client/bot.py
# -*- coding:utf-8 -*-
import os
import sys
import json
import hashlib
import requests
import platform
import configparser
# Prevent CLI output pollution
requests.packages.urllib3.disable_warnings()
import subprocess
import client
class Bot(object):
def __init__(self, logger, ra_url, path, verbose=True):
self._logger = logger
self._verbose = verbose
self._unsecure = False
self._path = path
self._ra_url = ra_url
self.ca_cert = os.path.join(self._path, 'ca.crt')
self.crl_crt = os.path.join(self._path, 'crl.pem')
try:
self.collection = client.Collection(self._path)
except Exception as err:
raise Exception('Unable to initialize collection: {e}'.format(e=err))
try:
# Store every certificates found
self.collection.list_nodes()
# Check compliance
self.collection.check_compliance(self._ra_url)
except Exception as err:
raise Exception('Unable to list certificates: {e}'.format(e=err))
try:
# Configure connection settings
self.__setup_connection()
except Exception as err:
raise Exception(err)
try:
# Always check CA certificate
self.get_ca_checksum()
except Exception as err:
raise Exception('Unable to calculate CA certificate checksum: {e}'.format(e=err))
if os.path.isfile(self.ca_cert):
stream = os.popen("openssl x509 -noout -subject -in {ca} -nameopt multiline | sed -n 's/ *commonName *= //p'".format(ca=self.ca_cert))
self.ca_name = stream.read().rstrip()
def __setup_connection(self):
# Remove trailing slash if needed
if self._ra_url[-1] == '/':
self._ra_url = self._ra_url[:-1]
if self._ra_url.startswith('http://'):
self._output('Using unsecured protocol "http://" is NOT recommended...', level="warning")
while True:
rep = input('Do you want to continue ? [y/N]')
if rep.lower() == 'y':
self._unsecure = True
break
raise Exception('Unsecure protocol refused by user.')
elif not self._ra_url.startswith('https://'):
self._ra_url = 'https://' + self._ra_url
self.headers = {'User-Agent':'uPKI client agent', 'Content-Type': 'application/json'}
def __request(self, url, data=None, cert=None, verb='GET', verify=False, text=False):
if verb.upper() not in ['GET','POST']:
raise NotImplementedError('Unsupported action')
action = getattr(requests, verb.lower())
json_data = json.dumps(data) if data else None
try:
r = action(url, data=json_data, headers=self.headers, verify=verify, cert=cert)
except Exception as err:
raise Exception('Unable to make TLS request: {e}'.format(e=err))
if r.status_code != 200:
raise Exception("HTTP(S) Request Error: {e}".format(e=r.content))
# For CA and CRL certificates
if text:
return r.text
try:
data = r.json()
except ValueError as err:
try:
error = r.text
raise Exception(error)
except AttributeError:
raise Exception('Unable to parse JSON answer; {e}'.format(e=err))
if data.get('status') != 'success':
raise Exception(data.get('message', 'Unknown error'))
return data
def __execute(self, cmd, cwd=None):
try:
self._output("Execute command: {c}".format(c=cmd), level='DEBUG')
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=self._path, executable='/bin/bash')
p.wait()
except Exception as err:
raise Exception('Unable to execute command: {e}'.format(e=err))
def _output(self, message, level=None):
try:
self._logger.write(message, level=level)
except Exception as err:
sys.out.write('Unable to log: {e}'.format(e=err))
def _get_mozilla_profile(self):
# Switch based on platform
if platform.system() == 'Linux':
f_path = os.path.expanduser('~/.mozilla/firefox')
alt_path = os.path.expanduser('~/snap/firefox/common/.mozilla/firefox')
if os.path.isdir(f_path):
mozilla_profile = f_path
elif os.path.isdir(alt_path):
mozilla_profile = alt_path
else:
raise NotImplementedError('Firefox has not been detected on this system')
elif platform.system() == 'Darwin':
if os.path.isdir(os.path.expanduser('~/Library/Application Support/Firefox/Profiles')):
mozilla_profile = os.path.expanduser('~/Library/Application Support/Firefox/Profiles')
else:
raise NotImplementedError('Firefox has not been detected on this system')
elif platform.system() == 'Windows':
if os.path.isdir(os.path.join(os.getenv('APPDATA'), r'Mozilla\Firefox')):
mozilla_profile = os.path.join(os.getenv('APPDATA'), r'Mozilla\Firefox')
else:
raise NotImplementedError('Firefox has not been detected on this system')
mozilla_profile_ini = os.path.join(mozilla_profile, r'profiles.ini')
profile = configparser.ConfigParser()
profile.read(mozilla_profile_ini)
data_path = os.path.normpath(os.path.join(mozilla_profile, profile.get('Profile0', 'Path')))
return data_path
def _add_to_firefox(self, p12_file, passwd):
self._output('Get Mozilla profile', level='debug')
data_path = self._get_mozilla_profile()
self._output('Found Firefox profile DB: {f}'.format(f=data_path), level='debug')
try:
self._output('Add {n} in Firefox'.format(n=self.ca_name))
cmd = "certutil -A -n '{n}' -t 'TC,,' -i {ca} -d sql:{d}".format(n=self.ca_name, ca=self.ca_cert, d=data_path)
self._output("> {c}".format(c=cmd), level='debug')
self.__execute(cmd)
except Exception as err:
self._output('Unable to add Root CA in Firefox', level='error')
try:
self._output('Add user certificate in Firefox')
cmd = "pk12util -i {c} -d sql:{d} -W '{p}'".format(c=p12_file, d=data_path, p=passwd)
self._output("> {c}".format(c=cmd), level='debug')
self.__execute(cmd)
except Exception as err:
self._output('Unable to add user certificate in Firefox', level='error')
return True
def _add_to_chrome(self, p12_file, pem_file, passwd):
if platform.system() == 'Linux':
if os.path.isdir(os.path.expanduser('~/.pki/nssdb')):
data_path = os.path.expanduser('~/.pki/nssdb')
try:
self._output('Add {n} in Chrome'.format(n=self.ca_name))
cmd = "certutil -A -n '{n}' -t 'TC,,' -i {ca} -d sql:{d}".format(n=self.ca_name, ca=self.ca_cert, d=data_path)
self._output("> {c}".format(c=cmd), level='debug')
self.__execute(cmd)
except Exception as err:
self._output('Unable to add Root CA in Chrome', level='error')
try:
self._output('Add user certificate in Chrome')
cmd = "pk12util -i {c} -d sql:{d} -W '{p}'".format(c=p12_file, d=data_path, p=passwd)
self._output("> {c}".format(c=cmd), level='debug')
self.__execute(cmd)
except Exception as err:
self._output('Unable to add user certificate in Chrome', level='error')
else:
raise FileNotFoundError('Chrome has not been detected on this system')
elif platform.system() == 'Darwin':
# Add to System KeyChain
if os.path.isfile('/Library/Keychains/System.keychain'):
data_path = '/Library/Keychains/System.keychain'
try:
self._output('[+] Run following command to import ProHacktive Root CA in System KeyChain')
cmd = "sudo security add-trusted-cert -d -r trustRoot -k {d} {ca}".format(d=data_path, ca=self.ca_cert)
self._output("> {c}".format(c=cmd), level='debug')
self.__execute(cmd)
except Exception as err:
self._output('Unable to add Root CA in System KeyChain', level='error')
# Add to User KeyChain
if os.path.isfile(os.path.expanduser('~/Library/Keychains/login.keychain')):
data_path = os.path.expanduser('~/Library/Keychains/login.keychain')
try:
self._output('[+] Run following command to import ProHacktive Root CA in User KeyChain')
cmd = "sudo security add-trusted-cert -d -r trustRoot -k {d} {ca}".format(d=data_path, ca=self.ca_cert)
self._output("> {c}".format(c=cmd), level='debug')
self.__execute(cmd)
except Exception as err:
self._output('Unable to add Root CA in Login KeyChain', level='error')
try:
self._output('Add user certificate in KeyChain')
# # Old version need a password
# cmd = "security import {c} -k {d} -P '{p}'".format(c=p12_file, d=data_path, p=passwd)
# New version is passwordless
cmd = "certtool i {c}".format(c=pem_file)
self._output("> {c}".format(c=cmd), level='debug')
self.__execute(cmd)
except Exception as err:
self._output('Unable to add user certificate in Login KeyChain', level='error')
else:
raise FileNotFoundError('No KeyChain detected on this system')
else:
raise NotImplementedError('Sorry this OS is not supported yet.')
return True
def get_ca_checksum(self):
try:
self._output('Check CA certificate', level="DEBUG")
ca_pem = self.__request(self._ra_url + '/certs/ca.crt', text=True)
except Exception as err:
raise Exception(err)
# Init hash function
received = hashlib.sha256(ca_pem.encode('utf-8')).hexdigest()
self._output('CA certificate hash received: {s}'.format(s=received), level='debug')
if os.path.isfile(self.ca_cert):
with open(self.ca_cert, 'rt') as f:
raw = f.read()
found = hashlib.sha256(raw.encode('utf-8')).hexdigest()
if found != received:
self._output('OLD CA certificate hash was: {s}'.format(s=found), level='debug')
self._output('NEW CA certificate received!', level="warning")
while True:
rep = input('Would you like to update it ? [y/N]')
if rep.lower() == 'y':
break
raise Exception('CA certificate change refused by user.')
# Remove CA protection
try:
os.chmod(self.ca_cert, 0o600)
except Exception as err:
raise Exception('Unable to remove CA certificate protection')
else:
# If nothing has changed abort
self._output('CA certificate unchanged', level='debug')
return True
else:
self._output('CA certificate first installation', level="warning")
# Rewrite CA certificate
with open(self.ca_cert,'wt') as f:
f.write(ca_pem)
# Protect CA certificate
try:
os.chmod(self.ca_cert, 0o444)
except Exception as err:
raise Exception('Unable to protect CA certificate')
return True
def add_node(self, name, profile, sans=[], p12=False, passwd=None, chrome=False, firefox=False):
if name is None:
name = input('Enter your node name (CN): ')
if profile is None:
profile = input('Enter your profile: ')
# Force p12 output if browser certificate needs to be generated
p12 = True if (chrome or firefox) else p12
# Store filenames
key_file = os.path.join(self._path, "{p}.{n}.key".format(p=profile, n=name))
req_file = os.path.join(self._path, "{p}.{n}.csr".format(p=profile, n=name))
crt_file = os.path.join(self._path, "{p}.{n}.crt".format(p=profile, n=name))
try:
self._output('Register node in local collection', level="DEBUG")
self.collection.register(self._ra_url, name, profile, sans, p12=p12, passwd=<PASSWORD>, chrome=chrome, firefox=firefox)
except Exception as err:
if "node already exists" in str(err).lower():
raise RuntimeError(err)
raise Exception('Unable to add node: {e}'.format(e=err))
# Avoid re-generate key if exists
if os.path.isfile(key_file) and os.path.isfile(req_file):
self._output('Skip key and CSR generation as they already exists', level='WARNING')
else:
try:
self._output('Request openssl command', level="DEBUG")
data = self.__request(self._ra_url + '/magic/' + profile, data={'cn': name, 'sans': sans}, verify=self.ca_cert, verb="POST")
except Exception as err:
raise Exception(err)
try:
cmd = data['command']
except KeyError:
raise Exception('Unable to get magic command')
try:
self.__execute(cmd)
except Exception as err:
raise Exception('Unable to execute magic command: {e}'.format(e=err))
try:
# Protect key and csr from re-write
os.chmod(key_file, 0o440)
os.chmod(req_file, 0o444)
except Exception as err:
raise Exception('Unable to protect key and certificate request')
with open(req_file, 'rt') as f:
csr = f.read()
try:
self._output('Request certificate', level="DEBUG")
data = self.__request(self._ra_url + '/certify', data={'CSR':csr}, verb="POST", verify=self.ca_cert)
except Exception as err:
if "certificate already generated" in str(err).lower():
raise RuntimeError(err)
raise Exception(err)
try:
data['certificate']
except KeyError:
raise Exception('Missing certificate')
try:
self._output('Update certificate status to signed', level="DEBUG")
self.collection.sign(name, profile)
except Exception as err:
raise Exception('Unable to update certificate status: {e}'.format(e=err))
with open(crt_file, 'wb') as f:
self._output('Writing certificate to {p}'.format(p=crt_file))
f.write(data['certificate'].encode('utf-8'))
try:
# Protect certificate from re-write
os.chmod(crt_file, 0o444)
except Exception as err:
raise Exception('Unable to protect certificate')
self._output('Generate PEM file with key and certificates')
with open(crt_file , 'rt') as f:
crt_content = f.read()
with open(key_file, 'rt') as f:
key_content = f.read()
pem_file = os.path.join(self._path, "{p}.{n}.pem".format(p=profile, n=name))
with open(pem_file, 'wt') as f:
f.write(crt_content)
f.write(key_content)
# Protect pem from re-write
| |
from __future__ import print_function
print(__doc__)
import os
import sys
import numpy as np
#import matplotlib
#matplotlib.use('AGG') # Do this BEFORE importing matplotlib.pyplot
import matplotlib.pyplot as plt
#from matplotlib.colors import Normalize
import matplotlib.colors as colors
import matplotlib.cm as cm
from sklearn import cross_validation
from sklearn import tree
from sklearn.ensemble import AdaBoostClassifier
from rep.estimators import XGBoostClassifier
from rep.estimators import SklearnClassifier
from sklearn.svm import SVC
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.cross_validation import StratifiedShuffleSplit
from sklearn.grid_search import GridSearchCV
from keras.wrappers.scikit_learn import KerasClassifier
from keras.utils import np_utils, visualize_util
from scipy import stats
import math
import p_value_scoring_object
import subprocess
import multiprocessing
import shutil
import time
#import signal
# Function definitions
def make_keras_model(n_hidden_layers, dimof_middle, dimof_input):
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.utils import np_utils, generic_utils
from keras.wrappers.scikit_learn import KerasClassifier
dimof_output =1
print("dimof_input : ",dimof_input, "dimof_output : ", dimof_output)
batch_size = 100
dropout = 0.5
countof_epoch = 5
model = Sequential()
model.add(Dense(input_dim=dimof_input, output_dim=dimof_middle, init="glorot_uniform",activation='relu'))
model.add(Dropout(dropout))
for n in range(n_hidden_layers):
model.add(Dense(input_dim=dimof_middle, output_dim=dimof_middle, init="glorot_uniform",activation='relu'))
model.add(Dropout(dropout))
model.add(Dense(input_dim=dimof_middle, output_dim=dimof_output, init="glorot_uniform",activation='sigmoid'))
#Compiling (might take longer)
model.compile(class_mode='binary',loss='binary_crossentropy', optimizer='adam',metrics=["accuracy"])
visualize_util.plot(model, to_file='model.png')
return model
class Counter(object):
# Creating a counter object to be able to perform cross validation with only one split
def __init__(self, list1,list2):
self.current = 1
self.list1 =list1
self.list2 =list2
def __iter__(self):
'Returns itself as an iterator object'
return self
def __next__(self):
'Returns the next value till current is lower than high'
if self.current > 1:
raise StopIteration
else:
self.current += 1
return self.list1,self.list2
next = __next__ #python2
def histo_plot_pvalue(U_0,abins,axlabel,aylabel,atitle,aname):
bins_probability=np.histogram(U_0,bins=abins)[1]
#Finding the p values corresponding to 1,2 and 3 sigma significance.
no_one_std_dev=sum(i < (1-0.6827) for i in U_0)
no_two_std_dev=sum(i < (1-0.9545) for i in U_0)
no_three_std_dev=sum(i < (1-0.9973) for i in U_0)
print(aname, " : ", no_one_std_dev,no_two_std_dev,no_three_std_dev)
with open(aname+"_p_values_1_2_3_std_dev.txt",'w') as p_value_1_2_3_std_dev_file:
p_value_1_2_3_std_dev_file.write(str(no_one_std_dev)+'\t'+str(no_two_std_dev)+'\t'+str(no_three_std_dev)+'\n')
#plt.rc('text', usetex=True)
textstr = '$1\sigma=%i$\n$2\sigma=%i$\n$3\sigma=%i$'%(no_one_std_dev, no_two_std_dev, no_three_std_dev)
plt.figure()
# Making a histogram of the probability predictions of the algorithm.
fig_pred_0= plt.figure()
ax1_pred_0= fig_pred_0.add_subplot(1, 1, 1)
n0, bins0, patches0 = ax1_pred_0.hist(U_0, bins=bins_probability, facecolor='red', alpha=0.5)
ax1_pred_0.set_xlabel(axlabel)
ax1_pred_0.set_ylabel(aylabel)
ax1_pred_0.set_title(atitle)
plt.xlim([0,1])
# these are matplotlib.patch.Patch properties
props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
# place a text box in upper left in axes coords
ax1_pred_0.text(0.85, 0.95, textstr, transform=ax1_pred_0.transAxes, fontsize=14,
verticalalignment='top', bbox=props)
fig_pred_0.savefig(aname+"_p_values_plot.png")
#fig_pred_0.show()
plt.close(fig_pred_0)
def plot_2D_opt_graph(expt,nclf):
class_name = nclf.name
name = expt.name_CPV.format(expt.optimisation_dimension)
if expt.scoring=="chi2": self_name_CPV= expt.name_CPV+ "_chi2scoring_" + str(expt.optimisation_no_bins)
else: self_name_CPV= expt.name_CPV
filename= nclf.name +"_"+ self_name_CPV.format(expt.optimisation_dimension)+"_optimisation_values"
data=np.loadtxt(filename+".txt",dtype='d')
number_of_iterations = data.shape[0]
if number_of_iterations > expt.number_of_iterations: number_of_iterations = expt.number_of_iterations
#print("data : ", data)
#print("number_of_iterations : ",number_of_iterations)
x=data[:number_of_iterations,0]
y=data[:number_of_iterations,1]
z=data[:number_of_iterations,2]
avmin= np.min(z)
avmin_nonzero = np.min([e for i, e in enumerate(z) if e != 0])
cm = plt.cm.get_cmap('RdYlBu')
fig= plt.figure()
ax1= fig.add_subplot(1, 1, 1)
sc = ax1.scatter(x,y,c=z,s=35,cmap=cm, norm=colors.LogNorm(),vmin=avmin,vmax=1)
#print("z : ",z)
index = np.argmin(z)
#print("index of max : ",index)
val_max = [x[index],y[index],z[index]]
#print("\n",nclf.name," values of max : ",val_max,"\n")
ax1.scatter(x[index],y[index],c=z[index], norm=colors.LogNorm(),s=50, cmap=cm,vmin=avmin,vmax=1)
with open(filename+"_best.txt",'w') as best_file:
best_file.write(str(x[index])+'\t'+str(y[index])+'\t'+str(z[index])+'\n')
avmin_power = int(math.floor(math.log10(avmin_nonzero)))
ticks, ticklabels = [1],['1']
for i in range(1,-avmin_power):
ticks.append(np.power(10.,float(-i)))
ticklabels.append('1E-'+str(i))
cb=fig.colorbar(sc,ticks=ticks)
cb.ax.set_yticklabels(ticklabels)
cb.set_label('p value')
ax1.set_xlabel(nclf.param_list[0])
ax1.set_ylabel(nclf.param_list[1])
ax1.set_title('hyperparam opt '+class_name+"\n"+expt.title_CPV)
print(nclf.name," saving to "+filename+".png \n")
fig.savefig(filename+".png")
plt.close(fig)
return val_max
#######################################################################################################################################################################################################
#######################################################################################################################################################################################################
#######################################################################################################################################################################################################
class nclf(object):
def __init__(self, name = 'dt', clf = tree.DecisionTreeClassifier(), param_list = ['max_depth','min_samples_split'], range_list = [[1, 60],[2,100]], param_opt=[],clf_nn_dict={} ):
self.name = name
self.clf = clf
self.param_list = param_list
self.range_list = range_list
self.param_opt = param_opt
self.clf_nn_dict = clf_nn_dict
assert(len(param_list)==2), "only 2 parameters can be varied"
def __str__(self):
print("\nname : ", self.name, "\nclf : ", self.clf, "\nparam_list : ", self.param_list, "\nrange_list : ", self.range_list, "\nparam_opt : ", self.param_opt)
def name_to_nclf(name):
if name=="dt":
anclf = nclf('dt',tree.DecisionTreeClassifier(),['max_depth','min_samples_split'], [[1, 60],[2,100]])
if name=="bdt":
anclf = nclf('bdt',AdaBoostClassifier(base_estimator=tree.DecisionTreeClassifier(max_depth=2)), ['learning_rate','n_estimators'], [[0.01,2.0],[100,1000]])
if name=="xgb":
anclf = nclf('xgb',XGBoostClassifier(), ['n_estimators','eta'], [[10,1000],[0.01,1.0]])
if name=="svm":
anclf = nclf('svm',SVC(probability=True, cache_size=7000), ['C','gamma'], [[1.0,1000.0],[1E-6,0.1]])
if name=="nn":
anclf = nclf('nn',"no classifier needed for nn", ['n_hidden_layers','dimof_middle'], [[0,1],[100,500]])
return anclf
#######################################################################################################################################################################################################
#######################################################################################################################################################################################################
#######################################################################################################################################################################################################
def optimise_job(expt,nclf,out_q):
print("\n"+nclf.name+" nclf.clf :",nclf.clf,"\n")
print(multiprocessing.current_process().name, " : ",nclf.name , " Starting")
if not os.path.exists(nclf.name):
os.makedirs(nclf.name)
os.chdir(nclf.name)
if os.path.exists("optimisation_done_flag"):os.remove("optimisation_done_flag")
expt.write_config(nclf)
expt.write_classifier_eval_wrapper(nclf)
# Starting process and
spearmint_output_file = open("spearmint_output_"+nclf.name, 'w')
p = subprocess.Popen(["python", "{}/main.py".format(expt.spearmint_directory),"."],stdout=spearmint_output_file, stderr=subprocess.STDOUT)
print("\n",nclf.name," optimisation is running")
while not os.path.isfile("optimisation_done_flag"):time.sleep(1)
#os.killpg(os.getpgid(p.pid), signal.SIGTERM)
p.kill()
spearmint_output_file.close()
#(output, err) = p.communicate()
#print("Command output : ",output)
print("\n",nclf.name," optimisation done")
param_opt = plot_2D_opt_graph(expt, nclf)
print("\n"+nclf.name, "nclf.param_list : ",nclf.param_list)
print(nclf.name, "param_opt : ", param_opt)
nclf.param_opt = []
for i in range(2):
limits = nclf.range_list[i]
lower_lim, upper_lim = limits
if isinstance( lower_lim, ( int, long ) ) and isinstance( upper_lim, ( int, long ) ):
nclf.param_opt.append(int(param_opt[i]))
else:
nclf.param_opt.append(param_opt[i])
print(nclf.name, "nclf.param_opt : ", nclf.param_opt,"\n")
for param_index, param in enumerate(nclf.param_list):
if not nclf.name == "nn": setattr(nclf.clf,param ,nclf.param_opt[param_index])
os.chdir("..")
#print(nclf.name+" nclf.clf :",nclf.clf)
out_q.put(nclf)
print(multiprocessing.current_process().name, " : ",nclf.name ," Finishing")
def evaluate_job(expt,nclf,out_q):
print("\n",multiprocessing.current_process().name, " : ",nclf.name ," Starting")
if not os.path.exists(nclf.name):
os.makedirs(nclf.name)
os.chdir(nclf.name)
for param_index, param in enumerate(nclf.param_list):
if nclf.name == "nn":
for dim_index, dim in enumerate(expt.evaluation_dimensions):
nclf.clf_nn_dict[str(dim)] = KerasClassifier(build_fn=make_keras_model,n_hidden_layers=nclf.param_opt[0],dimof_middle=nclf.param_opt[1],dimof_input=expt.keras_evaluation_dimensions[dim_index])
else:
#print("nclf.param_list :",nclf.param_list)
#print("nclf.param_opt :",nclf.param_opt)
setattr(nclf.clf,param ,nclf.param_opt[param_index])
print("\n",nclf.name+" nclf.clf :",nclf.clf,"\n")
if not expt.only_mod==2:
for dim in expt.evaluation_dimensions:
comp_file_list=[]
for i in range(expt.number_of_evaluations):
comp_file_list.append((expt.file_name_patterns[0].format(dim,i),expt.file_name_patterns[1].format(dim,i)))
if nclf.name == "nn": aclf = nclf.clf_nn_dict[str(dim)]
else: aclf=nclf.clf
if expt.scoring=='chi2':
for no_bins in expt.single_no_bins_list:
classifier_eval_2files(name= nclf.name + "_" + expt.name_CPV.format(dim) , comp_file_list=comp_file_list, clf =aclf, verbose=False, scoring=expt.scoring, no_bins=no_bins, systematics_fraction=expt.systematics_fraction, title=expt.title_CPV+" "+ str(dim)+"D" , transform = expt.transform)
else:
classifier_eval_2files(name= nclf.name + "_" + expt.name_CPV.format(dim) , comp_file_list=comp_file_list, clf =aclf, verbose=False, scoring=expt.scoring, title=expt.title_noCPV+" "+ str(dim)+"D", transform= expt.transform)
if not expt.only_mod or 2==expt.only_mod:
print(nclf.name,"Running NoCPV ")
for dim in expt.evaluation_dimensions:
comp_file_list=[]
for i in range(expt.number_of_evaluations):
comp_file_list.append((expt.file_name_patterns[0].format(dim,i),expt.file_name_patterns[0].format(dim,100+i)))
if nclf.name == "nn": aclf = nclf.clf_nn_dict[str(dim)]
else: aclf=nclf.clf
if expt.scoring=='chi2':
for no_bins in expt.single_no_bins_list:
classifier_eval_2files(name= nclf.name + "_" +expt.name_noCPV.format(dim) , comp_file_list=comp_file_list, clf =aclf, verbose=False, scoring=expt.scoring, no_bins=no_bins, systematics_fraction=expt.systematics_fraction, title=expt.title_noCPV+" "+ str(dim)+"D", transform = expt.transform)
else:
classifier_eval_2files(name= nclf.name + "_" +expt.name_noCPV.format(dim) , comp_file_list=comp_file_list, clf =aclf, verbose=False, scoring=expt.scoring, title=expt.title_noCPV+" "+ str(dim)+"D", transform= expt.transform)
os.chdir("..")
print(multiprocessing.current_process().name, " : ",nclf.name ," Finishing")
def plot_job(expt,nclf,out_q):
print("\n",multiprocessing.current_process().name, " : ",nclf.name ," Starting")
if not os.path.exists(nclf.name):
os.makedirs(nclf.name)
os.chdir(nclf.name)
for param_index, param in enumerate(nclf.param_list):
if nclf.name == "nn":
for dim_index, dim in enumerate(expt.plot_dimensions):
nclf.clf_nn_dict[str(dim)] = KerasClassifier(build_fn=make_keras_model,n_hidden_layers=nclf.param_opt[0],dimof_middle=nclf.param_opt[1],dimof_input=expt.keras_plot_dimensions[dim_index])
else:
#print("nclf.param_list :",nclf.param_list)
#print("nclf.param_opt :",nclf.param_opt)
setattr(nclf.clf,param ,nclf.param_opt[param_index])
print("\n",nclf.name+" nclf.clf :",nclf.clf,"\n")
for dim_index, dim in enumerate(expt.plot_dimensions):
comp_file_list=[]
comp_file_list.append((expt.file_name_patterns[0].format(dim,expt.most_typical[dim_index]),expt.file_name_patterns[1].format(dim,expt.most_typical[dim_index])))
comp_file_list.append((expt.file_name_patterns[0].format(dim,1+expt.most_typical[dim_index]),expt.file_name_patterns[1].format(dim,1+expt.most_typical[dim_index])))
if nclf.name == "nn": aclf = nclf.clf_nn_dict[str(dim)]
else: aclf=nclf.clf
if expt.scoring=='chi2':
for no_bins in expt.single_no_bins_list:
classifier_eval_2files(name= nclf.name + "_" + expt.name_CPV.format(dim) , comp_file_list=comp_file_list, clf =aclf, verbose=False, scoring=expt.scoring, no_bins=no_bins, systematics_fraction=expt.systematics_fraction, title=expt.title_CPV+" "+ str(dim)+"D" )
else:
classifier_eval_2files(name= nclf.name + "_" + expt.name_CPV.format(dim) , comp_file_list=comp_file_list, clf =aclf, verbose=False, scoring=expt.scoring, title=expt.title_noCPV+" "+ str(dim)+"D")
if not expt.only_mod:
print(nclf.name,"Running NoCPV ")
for dim_index, dim in enumerate(expt.plot_dimensions):
comp_file_list=[]
comp_file_list.append((expt.file_name_patterns[0].format(dim,expt.most_typical[dim_index]),expt.file_name_patterns[0].format(dim,100+expt.most_typical[dim_index])))
comp_file_list.append((expt.file_name_patterns[0].format(dim,1+expt.most_typical[dim_index]),expt.file_name_patterns[0].format(dim,101+expt.most_typical[dim_index])))
if nclf.name == "nn": aclf = nclf.clf_nn_dict[str(dim)]
else: aclf=nclf.clf
if expt.scoring=='chi2':
for no_bins in expt.single_no_bins_list:
classifier_eval_2files(name= nclf.name + "_" +expt.name_noCPV.format(dim) , comp_file_list=comp_file_list, clf =aclf, verbose=False, scoring=expt.scoring, no_bins=no_bins, systematics_fraction=expt.systematics_fraction, title=expt.title_noCPV+" "+ str(dim)+"D")
else:
classifier_eval_2files(name= nclf.name + "_" +expt.name_noCPV.format(dim) , comp_file_list=comp_file_list, clf =aclf, verbose=False, scoring=expt.scoring, title=expt.title+" "+ str(dim)+"D")
os.chdir("..")
print(multiprocessing.current_process().name, " : ",nclf.name ," Finishing")
def worker(expt, nclf):
"""thread worker function"""
name = multiprocessing.current_process().name
print(name, " : ",nclf.name , ' Starting')
if nclf.name == "xgb": time.sleep(5)
else: time.sleep(10)
print(name, " : ",nclf.name , ' Exiting')
return
#######################################################################################################################################################################################################
#######################################################################################################################################################################################################
#######################################################################################################################################################################################################
class experiment(object):
def __init__(self,**kwargs):
self.nclf_list = kwargs.get('nclf_list',[nclf()])
self.file_name_patterns = kwargs.get('file_name_patterns', [ os.environ['learningml']+"/GoF/data/gaussian_same_projection_on_each_axis/gauss_data/gaussian_same_projection_on_each_axis_redefined_{0}D_1000_0.6_0.2_0.1_{1}.txt", os.environ['learningml']+"/GoF/data/gaussian_same_projection_on_each_axis/gauss_data/gaussian_same_projection_on_each_axis_redefined_{0}D_1000_0.6_0.2_0.075_{1}.txt" ])
self.only_mod = kwargs.get('only_mod',False)
self.n_cores = kwargs.get('n_cores',7)
self.name_CPV = kwargs.get('name_CPV',"{0}Dname_CPV")
self.name_noCPV = kwargs.get('name_noCPV',"{0}Dname_noCPV")
self.title_CPV = kwargs.get('title_CPV','title_CPV')
self.title_noCPV = kwargs.get('title_noCPV','title_noCPV')
self.directory_name = kwargs.get('directory_name',"")
self.scoring = kwargs.get('scoring',"standard")
self.single_no_bins_list= kwargs.get('single_no_bins_list',[2])
self.systematics_fraction= kwargs.get('systematics_fraction',0.01)
self.transform = kwargs.get('transform','StandardScalar')
self.name_CPV = self.name_CPV+ "_syst_"+str(self.systematics_fraction).replace(".","_") + "_"
self.name_noCPV = self.name_noCPV+ "_syst_"+str(self.systematics_fraction).replace(".","_") + "_"
self.title_CPV = self.title_CPV + " syst" + str(self.systematics_fraction)
self.title_noCPV = self.title_noCPV + " syst" + str(self.systematics_fraction)
def set_name_CPV(self,name_CPV): self.name_CPV = name_CPV
def set_name_noCPV(self,name_noCPV): self.name_noCPV = name_noCPV
def set_nclf_list(self,nclf_list): self.nclf_list=nclf_list
def set_file_name_patterns(self,file_name_patterns): self.file_name_patterns = file_name_patterns
def set_only_mod(self, only_mod): self.only_mod = only_mod
def optimise(self,**kwargs):
self.optimisation_dimension = kwargs.get('optimisation_dimension',2)
self.keras_optimisation_dimension = kwargs.get('keras_optimisation_dimension',self.optimisation_dimension)
self.number_of_iterations = kwargs.get('number_of_iterations',50)
self.optimisation_no_bins = kwargs.get('optimisation_no_bins',self.single_no_bins_list[0])
self.spearmint_directory = kwargs.get('spearmint_directory', "/Users/weisser/Documents/Spearmint-master/spearmint")
opt_dir = "optimisation"+self.directory_name
if not os.path.exists(opt_dir):
os.makedirs(opt_dir)
os.chdir(opt_dir)
print(os.getcwd())
os.system(os.environ['learningml']+"/GoF/reinitialise_spearmint.sh")
if os.path.exists("MongoDB_bookkeeping"):
shutil.rmtree("MongoDB_bookkeeping")
os.makedirs("MongoDB_bookkeeping")
os.system("mongod --fork --logpath MongoDB_bookkeeping/example.log --dbpath MongoDB_bookkeeping")
out_q = multiprocessing.Queue()
jobs = []
for jobid, nclf in enumerate(self.nclf_list):
p = multiprocessing.Process(target=optimise_job, args=(self,nclf,out_q,))
jobs.append(p)
p.start()
for i in range(len(jobs)):
self.nclf_list[i] = out_q.get()
# Wait until jobs have finished
for j in jobs: j.join()
for nclf in self.nclf_list: print(nclf.name, " : ",nclf.clf)
print("\n","/"*100,"All OPTIMISATION jobs finished running","/"*100,"\n")
os.chdir("..")
return self.nclf_list
def evaluate(self,evaluation_dimensions=[2],number_of_evaluations=100, **kwargs):
self.evaluation_dimensions = evaluation_dimensions
self.number_of_evaluations = number_of_evaluations
self.keras_evaluation_dimensions = kwargs.get('keras_evaluation_dimensions', self.evaluation_dimensions)
eval_dir = "evaluation"+self.directory_name
if not os.path.exists(eval_dir):
os.makedirs(eval_dir)
os.chdir(eval_dir)
out_q = multiprocessing.Queue()
print("operating on file_name_patterns :" , self.file_name_patterns)
jobs = []
for nclf in self.nclf_list:
p = multiprocessing.Process(target=evaluate_job, args=(self,nclf,out_q,))
jobs.append(p)
p.start()
# Wait until jobs have finished
for j in jobs: j.join()
print("\n","/"*100,"All EVALUATION jobs finished running","/"*100)
os.chdir("..")
def plot(self,plot_dimensions=[2,6,10],most_typical=[0, 0, 0], **kwargs):
self.plot_dimensions = plot_dimensions
self.number_of_plots = 1
self.most_typical = most_typical
self.keras_plot_dimensions = kwargs.get('keras_plot_dimensions', self.plot_dimensions)
plot_dir = "plot"+self.directory_name
if not os.path.exists(plot_dir):
os.makedirs(plot_dir)
os.chdir(plot_dir)
out_q = multiprocessing.Queue()
print("operating on file_name_patterns :" , self.file_name_patterns)
jobs = []
for nclf in self.nclf_list:
p = multiprocessing.Process(target=plot_job, args=(self,nclf,out_q,))
jobs.append(p)
p.start()
# Wait until jobs have finished
for j in jobs: j.join()
print("\n","/"*100,"All EVALUATION jobs finished running","/"*100)
os.chdir("..")
def write_classifier_eval_wrapper(self, nclf):
classifier_content = 'import os \nimport signal \nimport numpy as np \nimport math \nimport sys \nsys.path.insert(0,os.environ["learningml"]+"/GoF") \nimport os \nimport classifier_eval \nfrom sklearn.tree import DecisionTreeClassifier \nfrom sklearn.ensemble import AdaBoostClassifier \nfrom sklearn.svm import SVC \nfrom keras.wrappers.scikit_learn import KerasClassifier \nfrom rep.estimators import XGBoostClassifier \n# Write a function like this called "main" \ndef main(job_id, params): \n\tprint "Anything printed here will end up in the output directory for job ", job_id \n\tprint params \n\n'
classifier_content += '\tif job_id>{}:file = open("optimisation_done_flag", "a").close()\n\n'.format(self.number_of_iterations)
#classifier_content += '\tassert (job_id<{}), "Final number of iterations reached" \n\n'.format(1+self.number_of_iterations)
#classifier_content += '\tif job_id>{}: \n\t\tprint("Killing parent process : ", os.getppid(),"\\n"*3) \n\t\tos.kill(os.getppid(), signal.SIGTERM) \n\n'.format(self.number_of_iterations)
classifier_content += '\tcomp_file_list= [("{}","{}")]\n\n'.format(self.file_name_patterns[0].format(self.optimisation_dimension,"optimisation_0"),self.file_name_patterns[1].format(self.optimisation_dimension,"optimisation_0"))
if nclf.name == "nn":
classifier_content += '\tclf = KerasClassifier(classifier_eval.make_keras_model,n_hidden_layers=params["n_hidden_layers"],dimof_middle=params["dimof_middle"],dimof_input={})'.format(self.keras_optimisation_dimension)
else:
clf_repr_list = repr(nclf.clf).split()
for param in nclf.param_list:
for index, item in enumerate(clf_repr_list):
if param in item:
head, | |
"page{}".format(num) # TODO delete me
self.image_path = None
class UVVertex:
"""Vertex in 2D"""
__slots__ = ('co', 'tup')
def __init__(self, vector):
self.co = vector.xy
self.tup = tuple(self.co)
class UVEdge:
"""Edge in 2D"""
# Every UVEdge is attached to only one UVFace
# UVEdges are doubled as needed because they both have to point clockwise around their faces
__slots__ = ('va', 'vb', 'uvface', 'loop',
'min', 'max', 'bottom', 'top',
'neighbor_left', 'neighbor_right', 'sticker')
def __init__(self, vertex1: UVVertex, vertex2: UVVertex, uvface, loop):
self.va = vertex1
self.vb = vertex2
self.update()
self.uvface = uvface
self.sticker = None
self.loop = loop
def update(self):
"""Update data if UVVertices have moved"""
self.min, self.max = (self.va, self.vb) if (self.va.tup < self.vb.tup) else (self.vb, self.va)
y1, y2 = self.va.co.y, self.vb.co.y
self.bottom, self.top = (y1, y2) if y1 < y2 else (y2, y1)
def is_uvface_upwards(self):
return (self.va.tup < self.vb.tup) ^ self.uvface.flipped
def __repr__(self):
return "({0.va} - {0.vb})".format(self)
class PhantomUVEdge:
"""Temporary 2D Segment for calculations"""
__slots__ = ('va', 'vb', 'min', 'max', 'bottom', 'top')
def __init__(self, vertex1: UVVertex, vertex2: UVVertex, flip):
self.va, self.vb = (vertex2, vertex1) if flip else (vertex1, vertex2)
self.min, self.max = (self.va, self.vb) if (self.va.tup < self.vb.tup) else (self.vb, self.va)
y1, y2 = self.va.co.y, self.vb.co.y
self.bottom, self.top = (y1, y2) if y1 < y2 else (y2, y1)
def is_uvface_upwards(self):
return self.va.tup < self.vb.tup
def __repr__(self):
return "[{0.va} - {0.vb}]".format(self)
class UVFace:
"""Face in 2D"""
__slots__ = ('vertices', 'edges', 'face', 'island', 'flipped')
def __init__(self, face: bmesh.types.BMFace, island: Island, matrix=1, normal_matrix=1):
self.face = face
self.island = island
self.flipped = False # a flipped UVFace has edges clockwise
flatten = z_up_matrix(normal_matrix @ face.normal) @ matrix
self.vertices = {loop: UVVertex(flatten @ loop.vert.co) for loop in face.loops}
self.edges = {loop: UVEdge(self.vertices[loop], self.vertices[loop.link_loop_next], self, loop) for loop in face.loops}
class Arrow:
"""Mark in the document: an arrow denoting the number of the edge it points to"""
__slots__ = ('bounds', 'center', 'rot', 'text', 'size')
def __init__(self, uvedge, size, index):
self.text = str(index)
edge = (uvedge.vb.co - uvedge.va.co) if not uvedge.uvface.flipped else (uvedge.va.co - uvedge.vb.co)
self.center = (uvedge.va.co + uvedge.vb.co) / 2
self.size = size
tangent = edge.normalized()
cos, sin = tangent
self.rot = M.Matrix(((cos, -sin), (sin, cos)))
normal = M.Vector((sin, -cos))
self.bounds = [self.center, self.center + (1.2 * normal + tangent) * size, self.center + (1.2 * normal - tangent) * size]
class Sticker:
"""Mark in the document: sticker tab"""
__slots__ = ('bounds', 'center', 'rot', 'text', 'width', 'vertices')
def __init__(self, uvedge, default_width, index, other: UVEdge):
"""Sticker is directly attached to the given UVEdge"""
first_vertex, second_vertex = (uvedge.va, uvedge.vb) if not uvedge.uvface.flipped else (uvedge.vb, uvedge.va)
edge = first_vertex.co - second_vertex.co
sticker_width = min(default_width, edge.length / 2)
other_first, other_second = (other.va, other.vb) if not other.uvface.flipped else (other.vb, other.va)
other_edge = other_second.co - other_first.co
# angle a is at vertex uvedge.va, b is at uvedge.vb
cos_a = cos_b = 0.5
sin_a = sin_b = 0.75**0.5
# len_a is length of the side adjacent to vertex a, len_b likewise
len_a = len_b = sticker_width / sin_a
# fix overlaps with the most often neighbour - its sticking target
if first_vertex == other_second:
cos_a = max(cos_a, edge.dot(other_edge) / (edge.length_squared)) # angles between pi/3 and 0
elif second_vertex == other_first:
cos_b = max(cos_b, edge.dot(other_edge) / (edge.length_squared)) # angles between pi/3 and 0
# Fix tabs for sticking targets with small angles
try:
other_face_neighbor_left = other.neighbor_left
other_face_neighbor_right = other.neighbor_right
other_edge_neighbor_a = other_face_neighbor_left.vb.co - other.vb.co
other_edge_neighbor_b = other_face_neighbor_right.va.co - other.va.co
# Adjacent angles in the face
cos_a = max(cos_a, -other_edge.dot(other_edge_neighbor_a) / (other_edge.length*other_edge_neighbor_a.length))
cos_b = max(cos_b, other_edge.dot(other_edge_neighbor_b) / (other_edge.length*other_edge_neighbor_b.length))
except AttributeError: # neighbor data may be missing for edges with 3+ faces
pass
except ZeroDivisionError:
pass
# Calculate the lengths of the glue tab edges using the possibly smaller angles
sin_a = abs(1 - cos_a**2)**0.5
len_b = min(len_a, (edge.length * sin_a) / (sin_a * cos_b + sin_b * cos_a))
len_a = 0 if sin_a == 0 else min(sticker_width / sin_a, (edge.length - len_b*cos_b) / cos_a)
sin_b = abs(1 - cos_b**2)**0.5
len_a = min(len_a, (edge.length * sin_b) / (sin_a * cos_b + sin_b * cos_a))
len_b = 0 if sin_b == 0 else min(sticker_width / sin_b, (edge.length - len_a * cos_a) / cos_b)
v3 = UVVertex(second_vertex.co + M.Matrix(((cos_b, -sin_b), (sin_b, cos_b))) @ edge * len_b / edge.length)
v4 = UVVertex(first_vertex.co + M.Matrix(((-cos_a, -sin_a), (sin_a, -cos_a))) @ edge * len_a / edge.length)
if v3.co != v4.co:
self.vertices = [second_vertex, v3, v4, first_vertex]
else:
self.vertices = [second_vertex, v3, first_vertex]
sin, cos = edge.y / edge.length, edge.x / edge.length
self.rot = M.Matrix(((cos, -sin), (sin, cos)))
self.width = sticker_width * 0.9
if index and uvedge.uvface.island is not other.uvface.island:
self.text = "{}:{}".format(other.uvface.island.abbreviation, index)
else:
self.text = index
self.center = (uvedge.va.co + uvedge.vb.co) / 2 + self.rot @ M.Vector((0, self.width * 0.2))
self.bounds = [v3.co, v4.co, self.center] if v3.co != v4.co else [v3.co, self.center]
class NumberAlone:
"""Mark in the document: numbering inside the island denoting edges to be sticked"""
__slots__ = ('bounds', 'center', 'rot', 'text', 'size')
def __init__(self, uvedge, index, default_size=0.005):
"""Sticker is directly attached to the given UVEdge"""
edge = (uvedge.va.co - uvedge.vb.co) if not uvedge.uvface.flipped else (uvedge.vb.co - uvedge.va.co)
self.size = default_size
sin, cos = edge.y / edge.length, edge.x / edge.length
self.rot = M.Matrix(((cos, -sin), (sin, cos)))
self.text = index
self.center = (uvedge.va.co + uvedge.vb.co) / 2 - self.rot @ M.Vector((0, self.size * 1.2))
self.bounds = [self.center]
class SVG:
"""Simple SVG exporter"""
def __init__(self, page_size: M.Vector, style, margin, pure_net=True, angle_epsilon=0.01):
"""Initialize document settings.
page_size: document dimensions in meters
pure_net: if True, do not use image"""
self.page_size = page_size
self.pure_net = pure_net
self.style = style
self.margin = margin
self.text_size = 12
self.angle_epsilon = angle_epsilon
@classmethod
def encode_image(cls, bpy_image):
import tempfile
import base64
with tempfile.TemporaryDirectory() as directory:
filename = directory + "/i.png"
bpy_image.filepath_raw = filename
bpy_image.save()
return base64.encodebytes(open(filename, "rb").read()).decode('ascii')
def format_vertex(self, vector, pos=M.Vector((0, 0))):
"""Return a string with both coordinates of the given vertex."""
x, y = vector + pos
return "{:.6f} {:.6f}".format((x + self.margin) * 1000, (self.page_size.y - y - self.margin) * 1000)
def write(self, mesh, filename):
"""Write data to a file given by its name."""
line_through = " L ".join # used for formatting of SVG path data
rows = "\n".join
dl = ["{:.2f}".format(length * self.style.line_width * 1000) for length in (2, 5, 10)]
format_style = {
'SOLID': "none", 'DOT': "{0},{1}".format(*dl), 'DASH': "{1},{2}".format(*dl),
'LONGDASH': "{2},{1}".format(*dl), 'DASHDOT': "{2},{1},{0},{1}".format(*dl)}
def format_color(vec):
return "#{:02x}{:02x}{:02x}".format(round(vec[0] * 255), round(vec[1] * 255), round(vec[2] * 255))
def format_matrix(matrix):
return " ".join("{:.6f}".format(cell) for column in matrix for cell in column)
def path_convert(string, relto=os_path.dirname(filename)):
assert(os_path) # check the module was imported
string = os_path.relpath(string, relto)
if os_path.sep != '/':
string = string.replace(os_path.sep, '/')
return string
styleargs = {
name: format_color(getattr(self.style, name)) for name in (
"outer_color", "outbg_color", "convex_color", "concave_color", "freestyle_color",
"inbg_color", "sticker_fill", "text_color")}
styleargs.update({
name: format_style[getattr(self.style, name)] for name in
("outer_style", "convex_style", "concave_style", "freestyle_style")})
styleargs.update({
name: getattr(self.style, attr)[3] for name, attr in (
("outer_alpha", "outer_color"), ("outbg_alpha", "outbg_color"),
("convex_alpha", "convex_color"), ("concave_alpha", "concave_color"),
("freestyle_alpha", "freestyle_color"),
("inbg_alpha", "inbg_color"), ("sticker_alpha", "sticker_fill"),
("text_alpha", "text_color"))})
styleargs.update({
name: getattr(self.style, name) * self.style.line_width * 1000 for name in
("outer_width", "convex_width", "concave_width", "freestyle_width", "outbg_width", "inbg_width")})
for num, page in enumerate(mesh.pages):
page_filename = "{}_{}.svg".format(filename[:filename.rfind(".svg")], page.name) if len(mesh.pages) > 1 else filename
with open(page_filename, 'w') as f:
print(self.svg_base.format(width=self.page_size.x*1000, height=self.page_size.y*1000), file=f)
print(self.css_base.format(**styleargs), file=f)
if page.image_path:
print(
self.image_linked_tag.format(
pos="{0:.6f} {0:.6f}".format(self.margin*1000),
width=(self.page_size.x - 2 * self.margin)*1000,
height=(self.page_size.y - 2 * self.margin)*1000,
path=path_convert(page.image_path)),
file=f)
if len(page.islands) > 1:
print("<g>", file=f)
for island in page.islands:
print("<g>", file=f)
if island.image_path:
print(
self.image_linked_tag.format(
pos=self.format_vertex(island.pos + M.Vector((0, island.bounding_box.y))),
width=island.bounding_box.x*1000,
height=island.bounding_box.y*1000,
path=path_convert(island.image_path)),
file=f)
elif island.embedded_image:
print(
self.image_embedded_tag.format(
pos=self.format_vertex(island.pos + M.Vector((0, island.bounding_box.y))),
width=island.bounding_box.x*1000,
height=island.bounding_box.y*1000,
path=island.image_path),
island.embedded_image, "'/>",
file=f, sep="")
if island.title:
print(
self.text_tag.format(
size=1000 * self.text_size,
x=1000 * (island.bounding_box.x*0.5 + island.pos.x + self.margin),
y=1000 * (self.page_size.y - island.pos.y - self.margin - 0.2 * self.text_size),
label=island.title),
file=f)
data_markers, data_stickerfill, data_outer, data_convex, data_concave, data_freestyle = (list() for i in range(6))
for marker in island.markers:
if isinstance(marker, Sticker):
data_stickerfill.append("M {} Z".format(
line_through(self.format_vertex(vertex.co, island.pos) for vertex in marker.vertices)))
if marker.text:
data_markers.append(self.text_transformed_tag.format(
label=marker.text,
pos=self.format_vertex(marker.center, island.pos),
mat=format_matrix(marker.rot),
| |
AtaSolutionProperties # type: ignore
from ._models import AuthenticationDetailsProperties # type: ignore
from ._models import AutoProvisioningSetting # type: ignore
from ._models import AutoProvisioningSettingList # type: ignore
from ._models import Automation # type: ignore
from ._models import AutomationAction # type: ignore
from ._models import AutomationActionEventHub # type: ignore
from ._models import AutomationActionLogicApp # type: ignore
from ._models import AutomationActionWorkspace # type: ignore
from ._models import AutomationList # type: ignore
from ._models import AutomationRuleSet # type: ignore
from ._models import AutomationScope # type: ignore
from ._models import AutomationSource # type: ignore
from ._models import AutomationTriggeringRule # type: ignore
from ._models import AutomationValidationStatus # type: ignore
from ._models import AwAssumeRoleAuthenticationDetailsProperties # type: ignore
from ._models import AwsCredsAuthenticationDetailsProperties # type: ignore
from ._models import AzureResourceDetails # type: ignore
from ._models import AzureResourceIdentifier # type: ignore
from ._models import AzureResourceLink # type: ignore
from ._models import AzureTrackedResourceLocation # type: ignore
from ._models import Baseline # type: ignore
from ._models import BaselineAdjustedResult # type: ignore
from ._models import BenchmarkReference # type: ignore
from ._models import CVE # type: ignore
from ._models import CVSS # type: ignore
from ._models import CefExternalSecuritySolution # type: ignore
from ._models import CefSolutionProperties # type: ignore
from ._models import Compliance # type: ignore
from ._models import ComplianceList # type: ignore
from ._models import ComplianceResult # type: ignore
from ._models import ComplianceResultList # type: ignore
from ._models import ComplianceSegment # type: ignore
from ._models import ConnectableResource # type: ignore
from ._models import ConnectedResource # type: ignore
from ._models import ConnectedWorkspace # type: ignore
from ._models import ConnectionToIpNotAllowed # type: ignore
from ._models import ConnectorSetting # type: ignore
from ._models import ConnectorSettingList # type: ignore
from ._models import ContainerRegistryVulnerabilityProperties # type: ignore
from ._models import CustomAlertRule # type: ignore
from ._models import DataExportSettings # type: ignore
from ._models import DenylistCustomAlertRule # type: ignore
from ._models import Device # type: ignore
from ._models import DeviceList # type: ignore
from ._models import DeviceSecurityGroup # type: ignore
from ._models import DeviceSecurityGroupList # type: ignore
from ._models import DirectMethodInvokesNotInAllowedRange # type: ignore
from ._models import DiscoveredSecuritySolution # type: ignore
from ._models import DiscoveredSecuritySolutionList # type: ignore
from ._models import ETag # type: ignore
from ._models import EffectiveNetworkSecurityGroups # type: ignore
from ._models import ExternalSecuritySolution # type: ignore
from ._models import ExternalSecuritySolutionKind # type: ignore
from ._models import ExternalSecuritySolutionList # type: ignore
from ._models import ExternalSecuritySolutionProperties # type: ignore
from ._models import FailedLocalLoginsNotInAllowedRange # type: ignore
from ._models import FileUploadsNotInAllowedRange # type: ignore
from ._models import Firmware # type: ignore
from ._models import GcpCredentialsDetailsProperties # type: ignore
from ._models import HttpC2DMessagesNotInAllowedRange # type: ignore
from ._models import HttpC2DRejectedMessagesNotInAllowedRange # type: ignore
from ._models import HttpD2CMessagesNotInAllowedRange # type: ignore
from ._models import HybridComputeSettingsProperties # type: ignore
from ._models import InformationProtectionKeyword # type: ignore
from ._models import InformationProtectionPolicy # type: ignore
from ._models import InformationProtectionPolicyList # type: ignore
from ._models import InformationType # type: ignore
from ._models import IoTSecurityAggregatedAlert # type: ignore
from ._models import IoTSecurityAggregatedAlertList # type: ignore
from ._models import IoTSecurityAggregatedAlertPropertiesTopDevicesListItem # type: ignore
from ._models import IoTSecurityAggregatedRecommendation # type: ignore
from ._models import IoTSecurityAggregatedRecommendationList # type: ignore
from ._models import IoTSecurityAlertedDevice # type: ignore
from ._models import IoTSecurityDeviceAlert # type: ignore
from ._models import IoTSecurityDeviceRecommendation # type: ignore
from ._models import IoTSecuritySolutionAnalyticsModel # type: ignore
from ._models import IoTSecuritySolutionAnalyticsModelList # type: ignore
from ._models import IoTSecuritySolutionAnalyticsModelPropertiesDevicesMetricsItem # type: ignore
from ._models import IoTSecuritySolutionModel # type: ignore
from ._models import IoTSecuritySolutionsList # type: ignore
from ._models import IoTSeverityMetrics # type: ignore
from ._models import IotAlert # type: ignore
from ._models import IotAlertList # type: ignore
from ._models import IotAlertListModel # type: ignore
from ._models import IotAlertModel # type: ignore
from ._models import IotAlertType # type: ignore
from ._models import IotAlertTypeList # type: ignore
from ._models import IotDefenderSettingsList # type: ignore
from ._models import IotDefenderSettingsModel # type: ignore
from ._models import IotRecommendation # type: ignore
from ._models import IotRecommendationList # type: ignore
from ._models import IotRecommendationListModel # type: ignore
from ._models import IotRecommendationModel # type: ignore
from ._models import IotRecommendationType # type: ignore
from ._models import IotRecommendationTypeList # type: ignore
from ._models import IotSensorsList # type: ignore
from ._models import IotSensorsModel # type: ignore
from ._models import IotSitesList # type: ignore
from ._models import IotSitesModel # type: ignore
from ._models import IpAddress # type: ignore
from ._models import JitNetworkAccessPoliciesList # type: ignore
from ._models import JitNetworkAccessPolicy # type: ignore
from ._models import JitNetworkAccessPolicyInitiatePort # type: ignore
from ._models import JitNetworkAccessPolicyInitiateRequest # type: ignore
from ._models import JitNetworkAccessPolicyInitiateVirtualMachine # type: ignore
from ._models import JitNetworkAccessPolicyVirtualMachine # type: ignore
from ._models import JitNetworkAccessPortRule # type: ignore
from ._models import JitNetworkAccessRequest # type: ignore
from ._models import JitNetworkAccessRequestPort # type: ignore
from ._models import JitNetworkAccessRequestVirtualMachine # type: ignore
from ._models import Kind # type: ignore
from ._models import ListCustomAlertRule # type: ignore
from ._models import LocalUserNotAllowed # type: ignore
from ._models import Location # type: ignore
from ._models import LogAnalyticsIdentifier # type: ignore
from ._models import MacAddress # type: ignore
from ._models import MqttC2DMessagesNotInAllowedRange # type: ignore
from ._models import MqttC2DRejectedMessagesNotInAllowedRange # type: ignore
from ._models import MqttD2CMessagesNotInAllowedRange # type: ignore
from ._models import NetworkInterface # type: ignore
from ._models import OnPremiseIotSensor # type: ignore
from ._models import OnPremiseIotSensorsList # type: ignore
from ._models import OnPremiseResourceDetails # type: ignore
from ._models import OnPremiseSqlResourceDetails # type: ignore
from ._models import Operation # type: ignore
from ._models import OperationDisplay # type: ignore
from ._models import OperationList # type: ignore
from ._models import PackageDownloadInfo # type: ignore
from ._models import PackageDownloads # type: ignore
from ._models import PackageDownloadsCentralManager # type: ignore
from ._models import PackageDownloadsCentralManagerFull # type: ignore
from ._models import PackageDownloadsCentralManagerFullOvf # type: ignore
from ._models import PackageDownloadsSensor # type: ignore
from ._models import PackageDownloadsSensorFull # type: ignore
from ._models import PackageDownloadsSensorFullOvf # type: ignore
from ._models import PathRecommendation # type: ignore
from ._models import Pricing # type: ignore
from ._models import PricingList # type: ignore
from ._models import ProcessNotAllowed # type: ignore
from ._models import ProtectionMode # type: ignore
from ._models import Protocol # type: ignore
from ._models import ProxyServerProperties # type: ignore
from ._models import PublisherInfo # type: ignore
from ._models import QueryCheck # type: ignore
from ._models import QueuePurgesNotInAllowedRange # type: ignore
from ._models import RecommendationConfigurationProperties # type: ignore
from ._models import RegulatoryComplianceAssessment # type: ignore
from ._models import RegulatoryComplianceAssessmentList # type: ignore
from ._models import RegulatoryComplianceControl # type: ignore
from ._models import RegulatoryComplianceControlList # type: ignore
from ._models import RegulatoryComplianceStandard # type: ignore
from ._models import RegulatoryComplianceStandardList # type: ignore
from ._models import Remediation # type: ignore
from ._models import ResetPasswordInput # type: ignore
from ._models import Resource # type: ignore
from ._models import ResourceDetails # type: ignore
from ._models import ResourceIdentifier # type: ignore
from ._models import Rule # type: ignore
from ._models import RuleResults # type: ignore
from ._models import RuleResultsInput # type: ignore
from ._models import RuleResultsProperties # type: ignore
from ._models import RulesResults # type: ignore
from ._models import RulesResultsInput # type: ignore
from ._models import Scan # type: ignore
from ._models import ScanProperties # type: ignore
from ._models import ScanResult # type: ignore
from ._models import ScanResultProperties # type: ignore
from ._models import ScanResults # type: ignore
from ._models import Scans # type: ignore
from ._models import ScopeElement # type: ignore
from ._models import SecureScoreControlDefinitionItem # type: ignore
from ._models import SecureScoreControlDefinitionList # type: ignore
from ._models import SecureScoreControlDefinitionSource # type: ignore
from ._models import SecureScoreControlDetails # type: ignore
from ._models import SecureScoreControlList # type: ignore
from ._models import SecureScoreControlScore # type: ignore
from ._models import SecureScoreItem # type: ignore
from ._models import SecureScoresList # type: ignore
from ._models import SecurityAssessment # type: ignore
from ._models import SecurityAssessmentList # type: ignore
from ._models import SecurityAssessmentMetadata # type: ignore
from ._models import SecurityAssessmentMetadataList # type: ignore
from ._models | |
import os
import sys
import glob
import random
import math
import datetime
import itertools
import json
import re
import logging
# from collections import OrderedDict
import numpy as np
from scipy.stats import multivariate_normal
# import scipy.misc
import tensorflow as tf
# import keras
import keras.backend as KB
import keras.layers as KL
import keras.engine as KE
sys.path.append('..')
import mrcnn.utils as utils
import tensorflow.contrib.util as tfc
import pprint
def build_predictions(mrcnn_class, mrcnn_bbox, norm_output_rois, config):
# // pass model to TensorBuilder
num_images = config.BATCH_SIZE
num_classes = config.NUM_CLASSES
num_rois = config.TRAIN_ROIS_PER_IMAGE
print('num rois :',num_rois)
h, w = config.IMAGE_SHAPE[:2]
num_cols = 8
# mdl_outputs[outroi_idx] returns the normalized coordinates, we multiply by h,w to get true coordinates
pred_tensor = np.zeros((num_images, num_classes, num_rois, num_cols ), dtype=np.float32) # img_in_batch, 4, 32, 8
pred_cls_cnt= np.zeros((num_images, num_classes), dtype=np.int16)
output_rois = norm_output_rois * np.array([h,w,h,w])
pred_new = np.empty((num_rois, num_cols))
# print('mrcnn_class shape : ', mrcnn_class.shape, '\t mrcnn_bbox.shape : ', mrcnn_bbox.shape )
# print('output_rois.shape : ', output_rois.shape, '\t pred_tensor shape: ', pred_tensor.shape )
#---------------------------------------------------------------------------
# use the argmaxof each row to determine the dominating (predicted) class
#---------------------------------------------------------------------------
for img in range(num_images):
img_roi_scores = mrcnn_class[img] # 2x32x4 -> 32x4
img_roi_boxes = output_rois[img]
# img_roi_boxes = output_rois[~np.all(output_rois[img] != 0, axis=0)]
# print(' before \n', output_rois[img])
# print(' after \n', img_roi_boxes)
_pred_class = np.argmax(img_roi_scores,axis=1) # (32,)
print('----------------------------------------------------------')
print(' image: ' , img)
print('----------------------------------------------------------')
# print('mrcnn_class[img] ',img_roi_scores.shape)
# print(img_roi_scores)
print('output_rois[img] ',img_roi_boxes.shape)
print(img_roi_boxes)
# print('img: ',img , 'pred_cls: ', _pred_class)
for cls in range(num_classes) :
cls_idxs = np.where( _pred_class == cls )
cls_cnt = cls_idxs[0].shape[0]
pred_new.fill(0)
print('----------------------------------------------------------')
print(' img/cls is: ' , img,'/',cls, 'cls_idxs: ' , cls_idxs[0])
print(' cls_idxs[0].shape: ', cls_idxs[0].shape, ' cls_cnt',cls_cnt)
print('----------------------------------------------------------')
score = np.max(img_roi_scores[cls_idxs],axis = -1)
pred_new[:cls_cnt,0] = cls_idxs[0]
pred_new[:cls_cnt,1] = score
pred_new[:cls_cnt,2:6]= img_roi_boxes[cls_idxs]
pred_new[:cls_cnt,6] = cls
pred_new[:cls_cnt,7] = range(cls_cnt)
# print(' mrcnn_class: ', img_roi_scores.shape)
# print( img_roi_scores[cls_idxs])
# print(' score.shape : ', score.shape)
# print( score)
# print(' img_roi_boxes.shape', img_roi_boxes[cls_idxs].shape)
## sort pred_new array in descending prediction order
order = pred_new[:cls_cnt,1].argsort()
pred_new[:cls_cnt,:7] = pred_new[order[::-1] ,:7] #[img, cls,::-1]
print('pred_new[img,cls] after sort:')
print(pred_new)
## drop (0,0,0,0) bounding boxes from pred_new array just constructed
# cls_boxes = pred_new[:cls_cnt,2:6]
# vld_indices = ~np.all( pred_new[:,2:6] == 0, axis=1)
# non_zero_rois = np.count_nonzero(vld_indices)
# print('vld_indices \n' , vld_indices.shape, 'non zero bounding boxes: ', non_zero_rois)
# print(vld_indices)
# pred_tensor[img,cls] = np.pad(pred_new[vld_indices], ((0, num_rois - non_zero_rois),(0,0)),'constant', constant_values = 0)
# print('pred_new after suppression of zero bboxes \n' , pred_tensor[img, cls])
# pred_cls_cnt[img, cls] = non_zero_rois
pred_tensor[img,cls] = pred_new
pred_cls_cnt[img, cls] = cls_cnt
print(' pred_cls_cnt is ' , pred_cls_cnt)
return [pred_tensor, pred_cls_cnt]
def build_predictions_tf(mrcnn_class, mrcnn_bbox, norm_output_rois, config):
# // pass model to TensorBuilder
batch_size = config.BATCH_SIZE
num_classes = config.NUM_CLASSES
num_rois = config.TRAIN_ROIS_PER_IMAGE
h, w = config.IMAGE_SHAPE[:2]
num_cols = 7
print('>>> build_predictions_tf' )
sess = tf.InteractiveSession()
output_rois = norm_output_rois * np.array([h,w,h,w])
# print('>>> build_predictions_tf')
# print(' mrcnn_class shape : ', mrcnn_class.shape)
# print(' mrcnn_bbox.shape : ', mrcnn_bbox.shape )
# print(' output_rois.shape : ', output_rois.shape)
# print(' pred_tensor : ', pred_tensor.shape)
# print(' pred_cls_cnt : ', pred_cls_cnt.shape)
#---------------------------------------------------------------------------
# use the argmaxof each row to determine the dominating (predicted) class
#---------------------------------------------------------------------------
# np.set_printoptions(linewidth=100, precision=4)
bbox_selected = tf.zeros_like(norm_output_rois)
pred_classes = tf.to_int32(tf.argmax( mrcnn_class,axis=-1))
pred_classes_exp = tf.to_float(tf.expand_dims(pred_classes ,axis=-1))
pred_scores = tf.reduce_max(mrcnn_class ,axis=-1, keepdims=True) # (32,)
# print(' pred_classes with highest scores:', pred_classes.get_shape() )
# pred_scores_exp = tf.to_float(tf.expand_dims(pred_scores, axis=-1))
# print(' pred_ scores:', pred_scores.get_shape())
batch_grid, roi_grid = tf.meshgrid( tf.range(batch_size, dtype=tf.int32), tf.range(num_rois, dtype=tf.int32), indexing = 'ij' )
bbox_idx = tf.to_float(tf.expand_dims(roi_grid , axis = -1))
scatter_ind = tf.stack([batch_grid , pred_classes, roi_grid],axis = -1)
# print('-- stack results ----')
# print('scatter_ind', type(scatter_ind), 'shape',tf.shape(scatter_ind).eval())
# print(scatter_ind.eval())
#-----------------------------------------------------------------------------------
# This part is used if we want to gather bbox coordinates from mrcnn_bbox
# Currently we are gathering bbox coordinates form output_roi so we dont need this
#-----------------------------------------------------------------------------------
# gather_boxes = tf.stack([batch_grid, roi_grid, pred_classes, ], axis = -1)
# print('-- gather_boxes ----')
# print('gather_boxes inds', type(gather_boxes), 'shape',tf.shape(gather_boxes).eval())
# print(gather_boxes.eval())
# bbox_selected = tf.gather_nd(mrcnn_bbox, gather_boxes)
# print(' bbox_selected shape : ', bbox_selected.get_shape())
# print(bbox_selected[0].eval())
pred_array = tf.concat([bbox_idx, pred_scores , output_rois, pred_classes_exp], axis=2)
print(' -- pred_tensor tf ------------------------------')
print(' resulting tensor : a_boxes_3d', type(pred_array), pred_array.shape)
# pred_array = pred_array[~np.all(pred_array[:,:,2:6] == 0, axis=1)]
class_ids = tf.to_int32(pred_array[:,:,6])
# print(' class shape: ', class_ids.get_shape())
# print(class_ids.eval())
# print(' roi_grid ', type(roi_grid), 'shape', roi_grid.get_shape())
# print(roi_grid.eval())
# print(' batch_grid ', type(batch_grid), 'shape',(batch_grid.get_shape()))
# print(batch_grid.eval())
pred_scatt = tf.scatter_nd(scatter_ind, pred_array, [batch_size, num_classes, num_rois,7])
print(' pred_scatter shape is ', pred_scatt.get_shape(), pred_scatt)
## sort in each class dimension based on prediction score
_, sort_inds = tf.nn.top_k(pred_scatt[:,:,:,1], k=pred_scatt.shape[2])
print(' sort inds shape : ', sort_inds.get_shape())
# build gathering indexes to use in sorting
class_grid, batch_grid, roi_grid = tf.meshgrid(tf.range(num_classes),tf.range(batch_size), tf.range(num_rois))
roi_grid_exp = tf.expand_dims(roi_grid, axis = -1)
gather_inds = tf.stack([batch_grid , class_grid, sort_inds],axis = -1)
pred_tensor = tf.gather_nd(pred_scatt, gather_inds)
pred_tensor = tf.concat([pred_tensor, tf.to_float(roi_grid_exp)], axis = -1)
print(' class_grid ', type(class_grid) , 'shape', class_grid.get_shape())
print(' batch_grid ', type(batch_grid) , 'shape', batch_grid.get_shape())
print(' roi_grid ', type(roi_grid) , 'shape', roi_grid.get_shape())
print(' gather_inds ', type(gather_inds), 'shape', gather_inds.get_shape())
print(' -- pred_tensor results (A-boxes sorted by score ----')
print(' pred_tensor ', pred_tensor.get_shape())
pred_cls_cnt = tf.count_nonzero(pred_tensor[:,:,:,0],axis = -1)
print(' pred_cls_cnt shape : ',pred_cls_cnt.get_shape())
print(' complete')
return [pred_tensor, pred_cls_cnt]
def build_ground_truth(gt_class_ids, norm_gt_bboxes, config):
# // pass model to TensorBuilder
num_images = config.BATCH_SIZE
num_classes = config.NUM_CLASSES
num_detections = config.DETECTION_MAX_INSTANCES
h, w = config.IMAGE_SHAPE[:2]
num_cols = 8
gt_tensor = np.zeros((num_images, num_classes, num_detections, num_cols ), dtype=np.float32) # img_in_batch, 4, 32, 8
gt_cls_cnt = np.zeros((num_images, num_classes), dtype=np.int16)
gt_bboxes = norm_gt_bboxes * np.array([h,w,h,w])
gt_new = np.empty((num_detections, num_cols))
# gt_masks = sample_x[gtmsk_idx][0,:,:,nz_idx]
# print('gt_class_ids shape : ', gt_class_ids.shape, '\t norm_gt_bboxes.shape : ', norm_gt_bboxes.shape )
# print('\n',gt_class_ids)
# print('\n',gt_bboxes)
#---------------------------------------------------------------------------
# generate ground truth tensors
# note - we ignore the background (class 0) in the ground truth
#---------------------------------------------------------------------------
for img in range(num_images):
for cls in range(1, num_classes) :
cls_idxs = np.where( gt_class_ids[img, :] == cls)
cls_cnt = cls_idxs[0].shape[0]
# print('img is: ' , img , 'class: ', cls, 'cls_idxs: ' , cls_idxs)
gt_new.fill(0)
gt_new[:cls_cnt,0] = range(cls_cnt)
gt_new[:cls_cnt,1] = 1.0
gt_new[:cls_cnt,2:6]= gt_bboxes[img, cls_idxs,:]
gt_new[:cls_cnt,6] = cls
gt_new[:cls_cnt,7] = cls_idxs[0]
# for j , c_idx in enumerate(cls_idxs):
# gt_tensor[img, cls, j, 0] = j
# gt_tensor[img, cls, j, 1] = 1.0 # probability
# gt_tensor[img, cls, j, 2:6] = gt_bboxes[img,c_idx,:] # roi coordinates
# gt_tensor[img, cls, j, 6] = cls # class_id
# gt_tensor[img, cls, j, 7] = c_idx # index from mrcnn_class array (temp for verification)
# print(gt_tensor[img,cls])
gt_tensor[img,cls] = gt_new
gt_cls_cnt[img, cls] = cls_cnt
# print('gt_tensor is')
return [gt_tensor, gt_cls_cnt]
def build_ground_truth_tf(gt_class_ids, norm_gt_bboxes, config):
# // pass model to TensorBuilder
batch_size = config.BATCH_SIZE
num_classes = config.NUM_CLASSES
num_detections = config.DETECTION_MAX_INSTANCES
h, w = config.IMAGE_SHAPE[:2]
num_cols = 7
print('>>> build_ground_truth_tf' )
print(' gt_class_ids shape : ', gt_class_ids.shape, ' notm_gt_bbox.shape : ', norm_gt_bboxes.shape )
# sess = tf.InteractiveSession()
gt_bboxes = norm_gt_bboxes * np.array([h,w,h,w])
#---------------------------------------------------------------------------
# use the argmaxof each row to determine the dominating (predicted) class
#---------------------------------------------------------------------------
# gt_classes = gt_class_ids # batch_size x max gt detections
gt_classes_exp = tf.to_float(tf.expand_dims(gt_class_ids ,axis=-1))
print(' gt_classes_exp shape ', gt_classes_exp.get_shape() )
ones = tf.ones_like(gt_class_ids)
zeros= tf.zeros_like(gt_class_ids)
mask = tf.greater(gt_class_ids , 0)
gt_scores = tf.where(mask, ones, zeros)
# pred_scores = tf.reduce_max(mrcnn_class ,axis=-1, keep_dims=True) # (32,)
gt_scores_exp = tf.to_float(tf.expand_dims(gt_scores, axis=-1))
print(' pred_ scores shape ', gt_scores.get_shape())
batch_grid, bbox_grid = tf.meshgrid( tf.range(batch_size , dtype=tf.int32),
tf.range(num_detections, dtype=tf.int32), indexing = 'ij' )
print(' bbox_grid shape ', bbox_grid.get_shape())
# print(bbox_grid.eval())
print(' batch_grid shape ', batch_grid.get_shape())
# print(batch_grid.eval())
bbox_idx_zeros = tf.zeros_like(bbox_grid)
bbox_idx = tf.where(mask, bbox_grid , bbox_idx_zeros)
bbox_idx = tf.to_float(tf.expand_dims(bbox_idx, axis = -1))
print(' bbox_idx shape ', bbox_idx.get_shape())
# print(bbox_idx.eval())
gt_array = tf.concat([bbox_idx, gt_scores_exp , gt_bboxes, gt_classes_exp], axis=2)
print(' gt_array shape ', gt_array.get_shape())
# dont need this as gt_class_ids is already int
#class_ids = tf.to_int32(gt_array[:,:,6])
print(' class shape ', gt_class_ids.get_shape())
# print(class_ids.eval())
print(' roi_grid shape ', bbox_grid.get_shape())
# print(roi_grid.eval())
print(' batch_grid shape ', batch_grid.get_shape())
# print(batch_grid.eval())
| |
<filename>toolbox/ui.py
import matplotlib.pyplot as plt
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
from PyQt5.QtCore import Qt, QStringListModel
from PyQt5.QtWidgets import *
from encoder.inference import plot_embedding_as_heatmap
from toolbox.utterance import Utterance
from pathlib import Path
from typing import List, Set
import sounddevice as sd
import soundfile as sf
import numpy as np
# from sklearn.manifold import TSNE # You can try with TSNE if you like, I prefer UMAP
from time import sleep
import umap
import sys
from warnings import filterwarnings, warn
filterwarnings("ignore")
colormap = np.array([
[0, 127, 70],
[255, 0, 0],
[255, 217, 38],
[0, 135, 255],
[165, 0, 165],
[255, 167, 255],
[97, 142, 151],
[0, 255, 255],
[255, 96, 38],
[142, 76, 0],
[33, 0, 127],
[0, 0, 0],
[183, 183, 183],
[76, 255, 0],
], dtype=np.float) / 255
default_text = \
"欢迎使用工具箱, 现已支持中文输入!"
class UI(QDialog):
min_umap_points = 4
max_log_lines = 5
max_saved_utterances = 20
def draw_utterance(self, utterance: Utterance, which):
self.draw_spec(utterance.spec, which)
self.draw_embed(utterance.embed, utterance.name, which)
def draw_embed(self, embed, name, which):
embed_ax, _ = self.current_ax if which == "current" else self.gen_ax
embed_ax.figure.suptitle("" if embed is None else name)
## Embedding
# Clear the plot
if len(embed_ax.images) > 0:
embed_ax.images[0].colorbar.remove()
embed_ax.clear()
# Draw the embed
if embed is not None:
plot_embedding_as_heatmap(embed, embed_ax)
embed_ax.set_title("embedding")
embed_ax.set_aspect("equal", "datalim")
embed_ax.set_xticks([])
embed_ax.set_yticks([])
embed_ax.figure.canvas.draw()
def draw_spec(self, spec, which):
_, spec_ax = self.current_ax if which == "current" else self.gen_ax
## Spectrogram
# Draw the spectrogram
spec_ax.clear()
if spec is not None:
im = spec_ax.imshow(spec, aspect="auto", interpolation="none")
# spec_ax.figure.colorbar(mappable=im, shrink=0.65, orientation="horizontal",
# spec_ax=spec_ax)
spec_ax.set_title("mel spectrogram")
spec_ax.set_xticks([])
spec_ax.set_yticks([])
spec_ax.figure.canvas.draw()
if which != "current":
self.vocode_button.setDisabled(spec is None)
def draw_umap_projections(self, utterances: Set[Utterance]):
self.umap_ax.clear()
speakers = np.unique([u.speaker_name for u in utterances])
colors = {speaker_name: colormap[i] for i, speaker_name in enumerate(speakers)}
embeds = [u.embed for u in utterances]
# Display a message if there aren't enough points
if len(utterances) < self.min_umap_points:
self.umap_ax.text(.5, .5, "Add %d more points to\ngenerate the projections" %
(self.min_umap_points - len(utterances)),
horizontalalignment='center', fontsize=15)
self.umap_ax.set_title("")
# Compute the projections
else:
if not self.umap_hot:
self.log(
"Drawing UMAP projections for the first time, this will take a few seconds.")
self.umap_hot = True
reducer = umap.UMAP(int(np.ceil(np.sqrt(len(embeds)))), metric="cosine")
# reducer = TSNE()
projections = reducer.fit_transform(embeds)
speakers_done = set()
for projection, utterance in zip(projections, utterances):
color = colors[utterance.speaker_name]
mark = "x" if "_gen_" in utterance.name else "o"
label = None if utterance.speaker_name in speakers_done else utterance.speaker_name
speakers_done.add(utterance.speaker_name)
self.umap_ax.scatter(projection[0], projection[1], c=[color], marker=mark,
label=label)
# self.umap_ax.set_title("UMAP projections")
self.umap_ax.legend(prop={'size': 10})
# Draw the plot
self.umap_ax.set_aspect("equal", "datalim")
self.umap_ax.set_xticks([])
self.umap_ax.set_yticks([])
self.umap_ax.figure.canvas.draw()
def save_audio_file(self, wav, sample_rate):
dialog = QFileDialog()
dialog.setDefaultSuffix(".wav")
fpath, _ = dialog.getSaveFileName(
parent=self,
caption="Select a path to save the audio file",
filter="Audio Files (*.flac *.wav)"
)
if fpath:
#Default format is wav
if Path(fpath).suffix == "":
fpath += ".wav"
sf.write(fpath, wav, sample_rate)
def setup_audio_devices(self, sample_rate):
input_devices = []
output_devices = []
for device in sd.query_devices():
# Check if valid input
try:
sd.check_input_settings(device=device["name"], samplerate=sample_rate)
input_devices.append(device["name"])
except:
pass
# Check if valid output
try:
sd.check_output_settings(device=device["name"], samplerate=sample_rate)
output_devices.append(device["name"])
except Exception as e:
# Log a warning only if the device is not an input
if not device["name"] in input_devices:
warn("Unsupported output device %s for the sample rate: %d \nError: %s" % (device["name"], sample_rate, str(e)))
if len(input_devices) == 0:
self.log("No audio input device detected. Recording may not work.")
self.audio_in_device = None
else:
self.audio_in_device = input_devices[0]
if len(output_devices) == 0:
self.log("No supported output audio devices were found! Audio output may not work.")
self.audio_out_devices_cb.addItems(["None"])
self.audio_out_devices_cb.setDisabled(True)
else:
self.audio_out_devices_cb.clear()
self.audio_out_devices_cb.addItems(output_devices)
self.audio_out_devices_cb.currentTextChanged.connect(self.set_audio_device)
self.set_audio_device()
def set_audio_device(self):
output_device = self.audio_out_devices_cb.currentText()
if output_device == "None":
output_device = None
# If None, sounddevice queries portaudio
sd.default.device = (self.audio_in_device, output_device)
def play(self, wav, sample_rate):
try:
sd.stop()
sd.play(wav, sample_rate)
except Exception as e:
print(e)
self.log("Error in audio playback. Try selecting a different audio output device.")
self.log("Your device must be connected before you start the toolbox.")
def stop(self):
sd.stop()
def record_one(self, sample_rate, duration):
self.record_button.setText("Recording...")
self.record_button.setDisabled(True)
self.log("Recording %d seconds of audio" % duration)
sd.stop()
try:
wav = sd.rec(duration * sample_rate, sample_rate, 1)
except Exception as e:
print(e)
self.log("Could not record anything. Is your recording device enabled?")
self.log("Your device must be connected before you start the toolbox.")
return None
for i in np.arange(0, duration, 0.1):
self.set_loading(i, duration)
sleep(0.1)
self.set_loading(duration, duration)
sd.wait()
self.log("Done recording.")
self.record_button.setText("Record")
self.record_button.setDisabled(False)
return wav.squeeze()
@property
def current_dataset_name(self):
return self.dataset_box.currentText()
@property
def current_speaker_name(self):
return self.speaker_box.currentText()
@property
def current_utterance_name(self):
return self.utterance_box.currentText()
def browse_file(self):
fpath = QFileDialog().getOpenFileName(
parent=self,
caption="Select an audio file",
filter="Audio Files (*.mp3 *.flac *.wav *.m4a)"
)
return Path(fpath[0]) if fpath[0] != "" else ""
@staticmethod
def repopulate_box(box, items, random=False):
"""
Resets a box and adds a list of items. Pass a list of (item, data) pairs instead to join
data to the items
"""
box.blockSignals(True)
box.clear()
for item in items:
item = list(item) if isinstance(item, tuple) else [item]
box.addItem(str(item[0]), *item[1:])
if len(items) > 0:
box.setCurrentIndex(np.random.randint(len(items)) if random else 0)
box.setDisabled(len(items) == 0)
box.blockSignals(False)
def populate_browser(self, datasets_root: Path, recognized_datasets: List, level: int,
random=True):
# Select a random dataset
if level <= 0:
if datasets_root is not None:
datasets = [datasets_root.joinpath(d) for d in recognized_datasets]
datasets = [d.relative_to(datasets_root) for d in datasets if d.exists()]
self.browser_load_button.setDisabled(len(datasets) == 0)
if datasets_root is None or len(datasets) == 0:
msg = "Warning: you d" + ("id not pass a root directory for datasets as argument" \
if datasets_root is None else "o not have any of the recognized datasets" \
" in %s" % datasets_root)
self.log(msg)
msg += ".\nThe recognized datasets are:\n\t%s\nFeel free to add your own. You " \
"can still use the toolbox by recording samples yourself." % \
("\n\t".join(recognized_datasets))
print(msg, file=sys.stderr)
self.random_utterance_button.setDisabled(True)
self.random_speaker_button.setDisabled(True)
self.random_dataset_button.setDisabled(True)
self.utterance_box.setDisabled(True)
self.speaker_box.setDisabled(True)
self.dataset_box.setDisabled(True)
self.browser_load_button.setDisabled(True)
self.auto_next_checkbox.setDisabled(True)
return
self.repopulate_box(self.dataset_box, datasets, random)
# Select a random speaker
if level <= 1:
speakers_root = datasets_root.joinpath(self.current_dataset_name)
speaker_names = [d.stem for d in speakers_root.glob("*") if d.is_dir()]
self.repopulate_box(self.speaker_box, speaker_names, random)
# Select a random utterance
if level <= 2:
utterances_root = datasets_root.joinpath(
self.current_dataset_name,
self.current_speaker_name
)
utterances = []
for extension in ['mp3', 'flac', 'wav', 'm4a']:
utterances.extend(Path(utterances_root).glob("**/*.%s" % extension))
utterances = [fpath.relative_to(utterances_root) for fpath in utterances]
self.repopulate_box(self.utterance_box, utterances, random)
def browser_select_next(self):
index = (self.utterance_box.currentIndex() + 1) % len(self.utterance_box)
self.utterance_box.setCurrentIndex(index)
@property
def current_encoder_fpath(self):
return self.encoder_box.itemData(self.encoder_box.currentIndex())
@property
def current_synthesizer_fpath(self):
return self.synthesizer_box.itemData(self.synthesizer_box.currentIndex())
@property
def current_vocoder_fpath(self):
return self.vocoder_box.itemData(self.vocoder_box.currentIndex())
def populate_models(self, encoder_models_dir: Path, synthesizer_models_dir: Path,
vocoder_models_dir: Path):
# Encoder
encoder_fpaths = list(encoder_models_dir.glob("*.pt"))
if len(encoder_fpaths) == 0:
raise Exception("No encoder models found in %s" % encoder_models_dir)
self.repopulate_box(self.encoder_box, [(f.stem, f) for f in encoder_fpaths])
# Synthesizer
synthesizer_fpaths = list(synthesizer_models_dir.glob("**/*.pt"))
if len(synthesizer_fpaths) == 0:
raise Exception("No synthesizer models found in %s" % synthesizer_models_dir)
self.repopulate_box(self.synthesizer_box, [(f.stem, f) for f in synthesizer_fpaths])
# Vocoder
vocoder_fpaths = list(vocoder_models_dir.glob("**/*.pt"))
vocoder_items = [(f.stem, f) for f in vocoder_fpaths] + [("Griffin-Lim", None)]
self.repopulate_box(self.vocoder_box, vocoder_items)
@property
def selected_utterance(self):
return self.utterance_history.itemData(self.utterance_history.currentIndex())
def register_utterance(self, utterance: Utterance):
self.utterance_history.blockSignals(True)
self.utterance_history.insertItem(0, utterance.name, utterance)
self.utterance_history.setCurrentIndex(0)
self.utterance_history.blockSignals(False)
if len(self.utterance_history) > self.max_saved_utterances:
self.utterance_history.removeItem(self.max_saved_utterances)
self.play_button.setDisabled(False)
self.generate_button.setDisabled(False)
self.synthesize_button.setDisabled(False)
def log(self, line, mode="newline"):
if mode == "newline":
self.logs.append(line)
if len(self.logs) > self.max_log_lines:
del self.logs[0]
elif mode == "append":
self.logs[-1] += line
elif mode == "overwrite":
self.logs[-1] = line
log_text = '\n'.join(self.logs)
self.log_window.setText(log_text)
self.app.processEvents()
def set_loading(self, value, maximum=1):
self.loading_bar.setValue(value * 100)
self.loading_bar.setMaximum(maximum * 100)
self.loading_bar.setTextVisible(value != 0)
self.app.processEvents()
def populate_gen_options(self, seed, trim_silences):
if seed is not None:
self.random_seed_checkbox.setChecked(True)
self.seed_textbox.setText(str(seed))
self.seed_textbox.setEnabled(True)
else:
self.random_seed_checkbox.setChecked(False)
self.seed_textbox.setText(str(0))
self.seed_textbox.setEnabled(False)
if not trim_silences:
self.trim_silences_checkbox.setChecked(False)
self.trim_silences_checkbox.setDisabled(True)
def update_seed_textbox(self):
if self.random_seed_checkbox.isChecked():
self.seed_textbox.setEnabled(True)
else:
self.seed_textbox.setEnabled(False)
def reset_interface(self):
self.draw_embed(None, None, "current")
self.draw_embed(None, None, "generated")
self.draw_spec(None, "current")
self.draw_spec(None, "generated")
self.draw_umap_projections(set())
self.set_loading(0)
self.play_button.setDisabled(True)
self.generate_button.setDisabled(True)
self.synthesize_button.setDisabled(True)
self.vocode_button.setDisabled(True)
self.replay_wav_button.setDisabled(True)
self.export_wav_button.setDisabled(True)
[self.log("") for _ in range(self.max_log_lines)]
def __init__(self):
## Initialize the application
self.app = QApplication(sys.argv)
super().__init__(None)
self.setWindowTitle("SV2TTS toolbox")
## Main layouts
# Root
root_layout = QGridLayout()
self.setLayout(root_layout)
# Browser
browser_layout = QGridLayout()
root_layout.addLayout(browser_layout, 0, 0, 1, 2)
# Generation
gen_layout = QVBoxLayout()
root_layout.addLayout(gen_layout, 0, 2, 1, 2)
# Projections
self.projections_layout = QVBoxLayout()
root_layout.addLayout(self.projections_layout, 1, 0, 1, 1)
# Visualizations
vis_layout = QVBoxLayout()
root_layout.addLayout(vis_layout, 1, 1, |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.