input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
""" Converter for Faceswap """
import logging
import cv2
import numpy as np
from plugins.plugin_loader import PluginLoader
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
class Converter():
""" The converter is responsible for swapping the original face(s) in a frame with the output
of a trained Faceswap model.
Parameters
----------
output_size: int
The size of the face, in pixels, that is output from the Faceswap model
coverage_ratio: float
The ratio of the training image that was used for training the Faceswap model
centering: str
The extracted face centering that the model was trained on (`"face"` or "`legacy`")
draw_transparent: bool
Whether the final output should be drawn onto a transparent layer rather than the original
frame. Only available with certain writer plugins.
pre_encode: python function
Some writer plugins support the pre-encoding of images prior to saving out. As patching is
done in multiple threads, but writing is done in a single thread, it can speed up the
process to do any pre-encoding as part of the converter process.
arguments: :class:`argparse.Namespace`
The arguments that were passed to the convert process as generated from Faceswap's command
line arguments
configfile: str, optional
Optional location of custom configuration ``ini`` file. If ``None`` then use the default
config location. Default: ``None``
"""
def __init__(self, output_size, coverage_ratio, centering, draw_transparent, pre_encode,
arguments, configfile=None):
logger.debug("Initializing %s: (output_size: %s, coverage_ratio: %s, centering: %s, "
"draw_transparent: %s, pre_encode: %s, arguments: %s, configfile: %s)",
self.__class__.__name__, output_size, coverage_ratio, centering,
draw_transparent, pre_encode, arguments, configfile)
self._output_size = output_size
self._coverage_ratio = coverage_ratio
self._centering = centering
self._draw_transparent = draw_transparent
self._writer_pre_encode = pre_encode
self._args = arguments
self._configfile = configfile
self._scale = arguments.output_scale / 100
self._adjustments = dict(box=None, mask=None, color=None, seamless=None, sharpening=None)
self._load_plugins()
logger.debug("Initialized %s", self.__class__.__name__)
@property
def cli_arguments(self):
""":class:`argparse.Namespace`: The command line arguments passed to the convert
process """
return self._args
def reinitialize(self, config):
""" Reinitialize this :class:`Converter`.
Called as part of the :mod:`~tools.preview` tool. Resets all adjustments then loads the
plugins as specified in the given config.
Parameters
----------
config: :class:`lib.config.FaceswapConfig`
Pre-loaded :class:`lib.config.FaceswapConfig`. used over any configuration on disk.
"""
logger.debug("Reinitializing converter")
self._adjustments = dict(box=None, mask=None, color=None, seamless=None, sharpening=None)
self._load_plugins(config=config, disable_logging=True)
logger.debug("Reinitialized converter")
def _load_plugins(self, config=None, disable_logging=False):
""" Load the requested adjustment plugins.
Loads the :mod:`plugins.converter` plugins that have been requested for this conversion
session.
Parameters
----------
config: :class:`lib.config.FaceswapConfig`, optional
Optional pre-loaded :class:`lib.config.FaceswapConfig`. If passed, then this will be
used over any configuration on disk. If ``None`` then it is ignored. Default: ``None``
disable_logging: bool, optional
Plugin loader outputs logging info every time a plugin is loaded. Set to ``True`` to
suppress these messages otherwise ``False``. Default: ``False``
"""
logger.debug("Loading plugins. config: %s", config)
self._adjustments["box"] = PluginLoader.get_converter(
"mask",
"box_blend",
disable_logging=disable_logging)(self._output_size,
configfile=self._configfile,
config=config)
self._adjustments["mask"] = PluginLoader.get_converter(
"mask",
"mask_blend",
disable_logging=disable_logging)(self._args.mask_type,
self._output_size,
self._coverage_ratio,
configfile=self._configfile,
config=config)
if self._args.color_adjustment != "none" and self._args.color_adjustment is not None:
self._adjustments["color"] = PluginLoader.get_converter(
"color",
self._args.color_adjustment,
disable_logging=disable_logging)(configfile=self._configfile, config=config)
sharpening = PluginLoader.get_converter(
"scaling",
"sharpen",
disable_logging=disable_logging)(configfile=self._configfile, config=config)
if sharpening.config.get("method", None) is not None:
self._adjustments["sharpening"] = sharpening
logger.debug("Loaded plugins: %s", self._adjustments)
def process(self, in_queue, out_queue):
""" Main convert process.
Takes items from the in queue, runs the relevant adjustments, patches faces to final frame
and outputs patched frame to the out queue.
Parameters
----------
in_queue: :class:`queue.Queue`
The output from :class:`scripts.convert.Predictor`. Contains detected faces from the
Faceswap model as well as the frame to be patched.
out_queue: :class:`queue.Queue`
The queue to place patched frames into for writing by one of Faceswap's
:mod:`plugins.convert.writer` plugins.
"""
logger.debug("Starting convert process. (in_queue: %s, out_queue: %s)",
in_queue, out_queue)
log_once = False
while True:
items = in_queue.get()
if items == "EOF":
logger.debug("EOF Received")
logger.debug("Patch queue finished")
# Signal EOF to other processes in pool
logger.debug("Putting EOF back to in_queue")
in_queue.put(items)
break
if isinstance(items, dict):
items = [items]
for item in items:
logger.trace("Patch queue got: '%s'", item["filename"])
try:
image = self._patch_image(item)
except Exception as err: # pylint: disable=broad-except
# Log error and output original frame
logger.error("Failed to convert image: '%s'. Reason: %s",
item["filename"], str(err))
image = item["image"]
loglevel = logger.trace if log_once else logger.warning
loglevel("Convert error traceback:", exc_info=True)
log_once = True
# UNCOMMENT THIS CODE BLOCK TO PRINT TRACEBACK ERRORS
# import sys ; import traceback
# exc_info = sys.exc_info() ; traceback.print_exception(*exc_info)
logger.trace("Out queue put: %s", item["filename"])
out_queue.put((item["filename"], image))
logger.debug("Completed convert process")
def _patch_image(self, predicted):
""" Patch a swapped face onto a frame.
Run selected adjustments and swap the faces in a frame.
Parameters
----------
predicted: dict
The output from :class:`scripts.convert.Predictor`.
Returns
-------
:class: `numpy.ndarray` or pre-encoded image output
The final frame ready for writing by a :mod:`plugins.convert.writer` plugin.
Frame is either an array, or the pre-encoded output from the writer's pre-encode
function (if it has one)
"""
logger.trace("Patching image: '%s'", predicted["filename"])
frame_size = (predicted["image"].shape[1], predicted["image"].shape[0])
new_image, background = self._get_new_image(predicted, frame_size)
patched_face = self._post_warp_adjustments(background, new_image)
patched_face = self._scale_image(patched_face)
patched_face *= 255.0
patched_face = np.rint(patched_face,
out=np.empty(patched_face.shape, dtype="uint8"),
casting='unsafe')
if self._writer_pre_encode is not None:
patched_face = self._writer_pre_encode(patched_face)
logger.trace("Patched image: '%s'", predicted["filename"])
return patched_face
def _get_new_image(self, predicted, frame_size):
""" Get the new face from the predictor and apply pre-warp manipulations.
Applies any requested adjustments to the raw output of the Faceswap model
before transforming the image into the target frame.
Parameters
----------
predicted: dict
The output from :class:`scripts.convert.Predictor`.
frame_size: tuple
The (`width`, `height`) of the final frame in pixels
Returns
-------
placeholder: :class: `numpy.ndarray`
The original frame with the swapped faces patched onto it
background: :class: `numpy.ndarray`
The original frame
"""
logger.trace("Getting: (filename: '%s', faces: %s)",
predicted["filename"], len(predicted["swapped_faces"]))
placeholder = np.zeros((frame_size[1], frame_size[0], 4), dtype="float32")
background = predicted["image"] / np.array(255.0, dtype="float32")
placeholder[:, :, :3] = background
for new_face, detected_face, reference_face in zip(predicted["swapped_faces"],
predicted["detected_faces"],
predicted["reference_faces"]):
predicted_mask = new_face[:, :, -1] if new_face.shape[2] == 4 else None
new_face = new_face[:, :, :3]
interpolator = reference_face.interpolators[1]
new_face = self._pre_warp_adjustments(new_face,
detected_face,
reference_face,
predicted_mask)
# Warp face with the mask
cv2.warpAffine(new_face,
reference_face.adjusted_matrix,
frame_size,
placeholder,
flags=cv2.WARP_INVERSE_MAP | interpolator,
borderMode=cv2.BORDER_TRANSPARENT)
logger.trace("Got filename: '%s'. (placeholders: %s)",
predicted["filename"], placeholder.shape)
return placeholder, background
def _pre_warp_adjustments(self, new_face, detected_face, reference_face, predicted_mask):
""" Run any requested adjustments that can be performed on the raw output from the Faceswap
model.
Any adjustments that can be performed before warping the face into the final frame are
performed here.
Parameters
----------
new_face: :class:`numpy.ndarray`
The swapped face received from the faceswap model.
detected_face: :class:`~lib.align.DetectedFace`
The detected_face object as defined in :class:`scripts.convert.Predictor`
reference_face: :class:`~lib.align.AlignedFace`
The aligned face object sized to the model output of the original face for reference
predicted_mask: :class:`numpy.ndarray` or ``None``
The predicted mask output from the Faceswap model. ``None`` if the model
did not learn a mask
Returns
-------
:class:`numpy.ndarray`
The face output from the Faceswap Model with any requested pre-warp adjustments
performed.
"""
logger.trace("new_face shape: %s, predicted_mask shape: %s", new_face.shape,
predicted_mask.shape if predicted_mask is not None else None)
old_face = reference_face.face[..., :3] / 255.0
new_face = self._adjustments["box"].run(new_face)
new_face, raw_mask = self._get_image_mask(new_face,
detected_face,
predicted_mask,
reference_face)
if self._adjustments["color"] is not None:
new_face = self._adjustments["color"].run(old_face, new_face, raw_mask)
if self._adjustments["seamless"] is not None:
new_face = self._adjustments["seamless"].run(old_face, new_face, raw_mask)
logger.trace("returning: new_face shape %s", new_face.shape)
return new_face
def _get_image_mask(self, new_face, detected_face, predicted_mask, reference_face):
""" Return any selected image mask and intersect with any box mask.
Places the requested mask into the new face's Alpha channel, intersecting with any box
mask that has already been applied.
Parameters
----------
new_face: :class:`numpy.ndarray`
The swapped face received from the faceswap model, with any box mask applied
detected_face: :class:`~lib.DetectedFace`
The detected_face object as defined in :class:`scripts.convert.Predictor`
predicted_mask: :class:`numpy.ndarray` or ``None``
The predicted mask output from the Faceswap model. ``None`` if the model
did not learn a mask
reference_face: :class:`~lib.align.AlignedFace`
The aligned face object sized to the model output of the original face for reference
Returns
-------
:class:`numpy.ndarray`
The swapped face with the requested mask added to the Alpha channel
"""
logger.trace("Getting mask. Image shape: %s", new_face.shape)
if self._args.mask_type != "none":
mask_centering = detected_face.mask[self._args.mask_type].stored_centering
else:
mask_centering = "face" # Unused but requires a valid value
crop_offset = (reference_face.pose.offset[self._centering] -
reference_face.pose.offset[mask_centering])
mask, raw_mask = self._adjustments["mask"].run(detected_face, crop_offset, self._centering,
predicted_mask=predicted_mask)
if new_face.shape[2] == 4:
logger.trace("Combining mask with alpha channel | |
db_delete_value(self, value):
self._db_value = None
def getPrimaryKey(self):
return self._db_id
class DBLocation(object):
vtType = 'location'
def __init__(self, id=None, x=None, y=None):
self._db_id = id
self._db_x = x
self._db_y = y
self.is_dirty = True
self.is_new = True
def __copy__(self):
return DBLocation.do_copy(self)
def do_copy(self, new_ids=False, id_scope=None, id_remap=None):
cp = DBLocation(id=self._db_id,
x=self._db_x,
y=self._db_y)
# set new ids
if new_ids:
new_id = id_scope.getNewId(self.vtType)
if self.vtType in id_scope.remap:
id_remap[(id_scope.remap[self.vtType], self.db_id)] = new_id
else:
id_remap[(self.vtType, self.db_id)] = new_id
cp.db_id = new_id
# recreate indices and set flags
if not new_ids:
cp.is_dirty = self.is_dirty
cp.is_new = self.is_new
return cp
@staticmethod
def update_version(old_obj, trans_dict, new_obj=None):
if new_obj is None:
new_obj = DBLocation()
class_dict = {}
if new_obj.__class__.__name__ in trans_dict:
class_dict = trans_dict[new_obj.__class__.__name__]
if 'id' in class_dict:
res = class_dict['id'](old_obj, trans_dict)
new_obj.db_id = res
elif hasattr(old_obj, 'db_id') and old_obj.db_id is not None:
new_obj.db_id = old_obj.db_id
if 'x' in class_dict:
res = class_dict['x'](old_obj, trans_dict)
new_obj.db_x = res
elif hasattr(old_obj, 'db_x') and old_obj.db_x is not None:
new_obj.db_x = old_obj.db_x
if 'y' in class_dict:
res = class_dict['y'](old_obj, trans_dict)
new_obj.db_y = res
elif hasattr(old_obj, 'db_y') and old_obj.db_y is not None:
new_obj.db_y = old_obj.db_y
new_obj.is_new = old_obj.is_new
new_obj.is_dirty = old_obj.is_dirty
return new_obj
def db_children(self, parent=(None,None), orphan=False):
return [(self, parent[0], parent[1])]
def db_deleted_children(self, remove=False):
children = []
return children
def has_changes(self):
if self.is_dirty:
return True
return False
def __get_db_id(self):
return self._db_id
def __set_db_id(self, id):
self._db_id = id
self.is_dirty = True
db_id = property(__get_db_id, __set_db_id)
def db_add_id(self, id):
self._db_id = id
def db_change_id(self, id):
self._db_id = id
def db_delete_id(self, id):
self._db_id = None
def __get_db_x(self):
return self._db_x
def __set_db_x(self, x):
self._db_x = x
self.is_dirty = True
db_x = property(__get_db_x, __set_db_x)
def db_add_x(self, x):
self._db_x = x
def db_change_x(self, x):
self._db_x = x
def db_delete_x(self, x):
self._db_x = None
def __get_db_y(self):
return self._db_y
def __set_db_y(self, y):
self._db_y = y
self.is_dirty = True
db_y = property(__get_db_y, __set_db_y)
def db_add_y(self, y):
self._db_y = y
def db_change_y(self, y):
self._db_y = y
def db_delete_y(self, y):
self._db_y = None
def getPrimaryKey(self):
return self._db_id
class DBParameter(object):
vtType = 'parameter'
def __init__(self, id=None, pos=None, name=None, type=None, val=None, alias=None):
self._db_id = id
self._db_pos = pos
self._db_name = name
self._db_type = type
self._db_val = val
self._db_alias = alias
self.is_dirty = True
self.is_new = True
def __copy__(self):
return DBParameter.do_copy(self)
def do_copy(self, new_ids=False, id_scope=None, id_remap=None):
cp = DBParameter(id=self._db_id,
pos=self._db_pos,
name=self._db_name,
type=self._db_type,
val=self._db_val,
alias=self._db_alias)
# set new ids
if new_ids:
new_id = id_scope.getNewId(self.vtType)
if self.vtType in id_scope.remap:
id_remap[(id_scope.remap[self.vtType], self.db_id)] = new_id
else:
id_remap[(self.vtType, self.db_id)] = new_id
cp.db_id = new_id
# recreate indices and set flags
if not new_ids:
cp.is_dirty = self.is_dirty
cp.is_new = self.is_new
return cp
@staticmethod
def update_version(old_obj, trans_dict, new_obj=None):
if new_obj is None:
new_obj = DBParameter()
class_dict = {}
if new_obj.__class__.__name__ in trans_dict:
class_dict = trans_dict[new_obj.__class__.__name__]
if 'id' in class_dict:
res = class_dict['id'](old_obj, trans_dict)
new_obj.db_id = res
elif hasattr(old_obj, 'db_id') and old_obj.db_id is not None:
new_obj.db_id = old_obj.db_id
if 'pos' in class_dict:
res = class_dict['pos'](old_obj, trans_dict)
new_obj.db_pos = res
elif hasattr(old_obj, 'db_pos') and old_obj.db_pos is not None:
new_obj.db_pos = old_obj.db_pos
if 'name' in class_dict:
res = class_dict['name'](old_obj, trans_dict)
new_obj.db_name = res
elif hasattr(old_obj, 'db_name') and old_obj.db_name is not None:
new_obj.db_name = old_obj.db_name
if 'type' in class_dict:
res = class_dict['type'](old_obj, trans_dict)
new_obj.db_type = res
elif hasattr(old_obj, 'db_type') and old_obj.db_type is not None:
new_obj.db_type = old_obj.db_type
if 'val' in class_dict:
res = class_dict['val'](old_obj, trans_dict)
new_obj.db_val = res
elif hasattr(old_obj, 'db_val') and old_obj.db_val is not None:
new_obj.db_val = old_obj.db_val
if 'alias' in class_dict:
res = class_dict['alias'](old_obj, trans_dict)
new_obj.db_alias = res
elif hasattr(old_obj, 'db_alias') and old_obj.db_alias is not None:
new_obj.db_alias = old_obj.db_alias
new_obj.is_new = old_obj.is_new
new_obj.is_dirty = old_obj.is_dirty
return new_obj
def db_children(self, parent=(None,None), orphan=False):
return [(self, parent[0], parent[1])]
def db_deleted_children(self, remove=False):
children = []
return children
def has_changes(self):
if self.is_dirty:
return True
return False
def __get_db_id(self):
return self._db_id
def __set_db_id(self, id):
self._db_id = id
self.is_dirty = True
db_id = property(__get_db_id, __set_db_id)
def db_add_id(self, id):
self._db_id = id
def db_change_id(self, id):
self._db_id = id
def db_delete_id(self, id):
self._db_id = None
def __get_db_pos(self):
return self._db_pos
def __set_db_pos(self, pos):
self._db_pos = pos
self.is_dirty = True
db_pos = property(__get_db_pos, __set_db_pos)
def db_add_pos(self, pos):
self._db_pos = pos
def db_change_pos(self, pos):
self._db_pos = pos
def db_delete_pos(self, pos):
self._db_pos = None
def __get_db_name(self):
return self._db_name
def __set_db_name(self, name):
self._db_name = name
self.is_dirty = True
db_name = property(__get_db_name, __set_db_name)
def db_add_name(self, name):
self._db_name = name
def db_change_name(self, name):
self._db_name = name
def db_delete_name(self, name):
self._db_name = None
def __get_db_type(self):
return self._db_type
def __set_db_type(self, type):
self._db_type = type
self.is_dirty = True
db_type = property(__get_db_type, __set_db_type)
def db_add_type(self, type):
self._db_type = type
def db_change_type(self, type):
self._db_type = type
def db_delete_type(self, type):
self._db_type = None
def __get_db_val(self):
return self._db_val
def __set_db_val(self, val):
self._db_val = val
self.is_dirty = True
db_val = property(__get_db_val, __set_db_val)
def db_add_val(self, val):
self._db_val = val
def db_change_val(self, val):
self._db_val = val
def db_delete_val(self, val):
self._db_val = None
def __get_db_alias(self):
return self._db_alias
def __set_db_alias(self, alias):
self._db_alias = alias
self.is_dirty = True
db_alias = property(__get_db_alias, __set_db_alias)
def db_add_alias(self, alias):
self._db_alias = alias
def db_change_alias(self, alias):
self._db_alias = alias
def db_delete_alias(self, alias):
self._db_alias = None
def getPrimaryKey(self):
return self._db_id
class DBPluginData(object):
vtType = 'plugin_data'
def __init__(self, id=None, data=None):
self._db_id = id
self._db_data = data
self.is_dirty = True
self.is_new = True
def __copy__(self):
return DBPluginData.do_copy(self)
def do_copy(self, new_ids=False, id_scope=None, id_remap=None):
cp = DBPluginData(id=self._db_id,
data=self._db_data)
# set new ids
if new_ids:
new_id = id_scope.getNewId(self.vtType)
if self.vtType in id_scope.remap:
id_remap[(id_scope.remap[self.vtType], self.db_id)] = new_id
else:
id_remap[(self.vtType, self.db_id)] = new_id
cp.db_id = new_id
# recreate indices and set flags
if not new_ids:
cp.is_dirty = self.is_dirty
cp.is_new = self.is_new
return cp
@staticmethod
def update_version(old_obj, trans_dict, new_obj=None):
if new_obj is None:
new_obj = DBPluginData()
class_dict = {}
if new_obj.__class__.__name__ in trans_dict:
class_dict = trans_dict[new_obj.__class__.__name__]
if 'id' in class_dict:
res = class_dict['id'](old_obj, trans_dict)
new_obj.db_id = res
elif hasattr(old_obj, 'db_id') and old_obj.db_id is not None:
new_obj.db_id = old_obj.db_id
if 'data' in class_dict:
res = class_dict['data'](old_obj, trans_dict)
new_obj.db_data = res
elif hasattr(old_obj, 'db_data') and old_obj.db_data is not None:
new_obj.db_data = old_obj.db_data
new_obj.is_new = old_obj.is_new
new_obj.is_dirty = old_obj.is_dirty
return new_obj
def db_children(self, parent=(None,None), orphan=False):
return [(self, parent[0], parent[1])]
def db_deleted_children(self, remove=False):
children = []
return children
def has_changes(self):
if self.is_dirty:
return True
return False
def __get_db_id(self):
return self._db_id
def __set_db_id(self, id):
self._db_id = id
self.is_dirty = True
db_id = property(__get_db_id, __set_db_id)
def db_add_id(self, id):
self._db_id = id
def db_change_id(self, id):
self._db_id = id
def db_delete_id(self, id):
self._db_id = None
def __get_db_data(self):
return self._db_data
def __set_db_data(self, data):
self._db_data = data
self.is_dirty = True
db_data = property(__get_db_data, __set_db_data)
def db_add_data(self, data):
self._db_data = data
def db_change_data(self, data):
self._db_data = data
def db_delete_data(self, data):
self._db_data = None
def getPrimaryKey(self):
return self._db_id
class DBFunction(object):
vtType = 'function'
def __init__(self, id=None, pos=None, name=None, parameters=None):
self._db_id = id
self._db_pos = pos
self._db_name = name
self.db_deleted_parameters = []
self.db_parameters_id_index = {}
if parameters is None:
self._db_parameters = []
else:
self._db_parameters = parameters
for v in self._db_parameters:
self.db_parameters_id_index[v.db_id] = v
self.is_dirty = True
self.is_new = True
def __copy__(self):
return DBFunction.do_copy(self)
def do_copy(self, new_ids=False, id_scope=None, id_remap=None):
cp = DBFunction(id=self._db_id,
pos=self._db_pos,
name=self._db_name)
if self._db_parameters is None:
cp._db_parameters = []
else:
cp._db_parameters = [v.do_copy(new_ids, id_scope, id_remap) for v in self._db_parameters]
# set new ids
if new_ids:
new_id = id_scope.getNewId(self.vtType)
if self.vtType in id_scope.remap:
id_remap[(id_scope.remap[self.vtType], self.db_id)] = new_id
else:
id_remap[(self.vtType, self.db_id)] = new_id
cp.db_id = new_id
# recreate indices and set flags
cp.db_parameters_id_index = dict((v.db_id, v) for v in cp._db_parameters)
if not new_ids:
cp.is_dirty = self.is_dirty
cp.is_new = self.is_new
return cp
@staticmethod
def update_version(old_obj, trans_dict, new_obj=None):
if new_obj is None:
new_obj = DBFunction()
class_dict = {}
if new_obj.__class__.__name__ in trans_dict:
class_dict = trans_dict[new_obj.__class__.__name__]
if 'id' in class_dict:
res = class_dict['id'](old_obj, trans_dict)
new_obj.db_id = res
elif hasattr(old_obj, 'db_id') and old_obj.db_id is not None:
new_obj.db_id = old_obj.db_id
if 'pos' in class_dict:
res = class_dict['pos'](old_obj, trans_dict)
new_obj.db_pos = res
elif hasattr(old_obj, 'db_pos') and old_obj.db_pos is not None:
new_obj.db_pos = old_obj.db_pos
| |
<filename>google/cloud/securitycenter/settings/v1beta1/securitycenter-settings-v1beta1-py/google/cloud/securitycenter/settings_v1beta1/services/security_center_settings_service/transports/grpc_asyncio.py
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1 # type: ignore
from google.api_core import grpc_helpers_async # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import packaging.version
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.securitycenter.settings_v1beta1.types import component_settings
from google.cloud.securitycenter.settings_v1beta1.types import component_settings as gcss_component_settings
from google.cloud.securitycenter.settings_v1beta1.types import securitycenter_settings_service
from google.cloud.securitycenter.settings_v1beta1.types import settings
from google.cloud.securitycenter.settings_v1beta1.types import settings as gcss_settings
from google.protobuf import empty_pb2 # type: ignore
from .base import SecurityCenterSettingsServiceTransport, DEFAULT_CLIENT_INFO
from .grpc import SecurityCenterSettingsServiceGrpcTransport
class SecurityCenterSettingsServiceGrpcAsyncIOTransport(SecurityCenterSettingsServiceTransport):
"""gRPC AsyncIO backend transport for SecurityCenterSettingsService.
API Overview
------------
The SecurityCenterSettingsService is a sub-api of
``securitycenter.googleapis.com``. The service provides methods to
manage Security Center Settings, and Component Settings for GCP
organizations, folders, projects, and clusters.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(cls,
host: str = 'securitycenter.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs
)
def __init__(self, *,
host: str = 'securitycenter.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def get_service_account(self) -> Callable[
[securitycenter_settings_service.GetServiceAccountRequest],
Awaitable[securitycenter_settings_service.ServiceAccount]]:
r"""Return a callable for the get service account method over gRPC.
Retrieves the organizations service account, if it
exists, otherwise it creates the organization service
account. This API is idempotent and will only create a
service account once. On subsequent calls it will return
the previously created service account. SHA, SCC and
CTD Infra Automation will use this SA. This SA will not
have any permissions when created. The UI will
provision this via IAM or the user will using their own
internal process. This API only creates SAs on the
organization. Folders are not supported and projects
will use per-project SAs associated with APIs enabled on
a project. This API will be called by the UX onboarding
workflow.
Returns:
Callable[[~.GetServiceAccountRequest],
Awaitable[~.ServiceAccount]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'get_service_account' not in self._stubs:
self._stubs['get_service_account'] = self.grpc_channel.unary_unary(
'/google.cloud.securitycenter.settings.v1beta1.SecurityCenterSettingsService/GetServiceAccount',
request_serializer=securitycenter_settings_service.GetServiceAccountRequest.serialize,
response_deserializer=securitycenter_settings_service.ServiceAccount.deserialize,
)
return self._stubs['get_service_account']
@property
def get_settings(self) -> Callable[
[securitycenter_settings_service.GetSettingsRequest],
Awaitable[settings.Settings]]:
r"""Return a callable for the get settings method over gRPC.
Gets the Settings.
Returns:
Callable[[~.GetSettingsRequest],
Awaitable[~.Settings]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'get_settings' not in self._stubs:
self._stubs['get_settings'] = self.grpc_channel.unary_unary(
'/google.cloud.securitycenter.settings.v1beta1.SecurityCenterSettingsService/GetSettings',
request_serializer=securitycenter_settings_service.GetSettingsRequest.serialize,
response_deserializer=settings.Settings.deserialize,
)
return self._stubs['get_settings']
@property
def update_settings(self) -> Callable[
[securitycenter_settings_service.UpdateSettingsRequest],
Awaitable[gcss_settings.Settings]]:
r"""Return a callable for the update settings method over gRPC.
Updates the Settings.
Returns:
Callable[[~.UpdateSettingsRequest],
Awaitable[~.Settings]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles | |
<reponame>samuelebortolotti/wikidump<filename>wikidump/processors/user_warnings_templates_tokens.py
"""Extract the most recurrent tokens of the template text"""
import json
import more_itertools
import mwxml
import datetime
from typing import Iterable, Iterator, Mapping, Optional
from backports.datetime_fromisoformat import MonkeyPatch
# nltk
from .. import extractors, user_warnings_en, user_warnings_it, user_warnings_es, user_warnings_ca, utils
import math
import random
# Polyfiller for retrocompatibiliy with Python3.5
MonkeyPatch.patch_fromisoformat()
# MAX REVISIONS
MAX_REVISION_CACHE = 100
# REVISION STORAGE
REVISION_STORAGE = list()
# time interval in seconds
time_interval_in_seconds = {
'1 day': 86400,
'1 week': 604800
}
# user warnings templates
user_warnings_templates = set(
user_warnings_en.block_templates_indefinitely_blocked_templates + \
user_warnings_en.block_templates + \
user_warnings_en.arbitration_enforcement_templates_1RR_related_templates + \
user_warnings_en.arbitration_enforcement_templates_pages_with_discretionary_sanctions_editnotice + \
user_warnings_en.arbitration_enforcement_templates + \
user_warnings_en.csd_warning_templates + \
user_warnings_en.community_authorised_general_sanctions_templates + \
user_warnings_en.community_authorised_general_sanctions_templates_standardized + \
user_warnings_en.community_authorised_general_sanctions_templates_obsolete + \
user_warnings_en.non_english_welcome + \
user_warnings_en.non_english + \
user_warnings_en.test_templates + \
user_warnings_en.standardized_templates + \
user_warnings_en.user_warnings_templates + \
user_warnings_it.avviso_utenti_anonimi + \
user_warnings_it.benvenuto + \
user_warnings_it.benvenuto_progetti + \
user_warnings_it.avviso_copyright + \
user_warnings_it.avviso_invito_progetti + \
user_warnings_it.vandalismo + \
user_warnings_es.bienvenida + \
user_warnings_es.permission_grant_notification_templates + \
user_warnings_es.user_warnings + \
user_warnings_ca.benvinguda + \
user_warnings_ca.Avisos_de_discussio + \
user_warnings_ca.plantilles_d_avisos_d_edicio_generics + \
user_warnings_ca.plantilles_d_avisos_d_edicio + \
user_warnings_ca.plantilles_d_avisos_d_idioma + \
user_warnings_ca.plantilles_d_avisos
)
# REVISION AND PAGE CLASSES
class Revision:
"""Class which represent a revision of the template page"""
def __init__(self, id: str, user: mwxml.Revision.User, timestamp: str, template_info: extractors.user_warnings_template_words.UserWarningTf):
self.id = id # revision id
self.user = user # revision user
self.timestamp = timestamp # revision timestamp
self.template_info = template_info # template information about the words stemmed and without stopwords and occurences
self.words_to_search = list() # list of the k words which characterizes the the template the most (k = template_info.total_number_words / 2)
def to_dict(self) -> str:
"""Converts the object instance into a dictionary"""
obj = dict()
obj['id'] = self.id
user_id = ''
user_name = ''
if self.user:
user_id = self.user.id
user_name = self.user.text
obj['user_id'] = user_id
obj['user_name'] = user_name
obj['timestamp'] = self.timestamp
obj['template_info'] = self.template_info.to_dict()
obj['words_to_search'] = self.words_to_search
return obj
def __repr__(self):
return 'date: {}'.format(self.timestamp)
def __lt__(self, other):
return datetime.datetime.fromisoformat(self.timestamp.replace('Z', '+00:00')) < datetime.datetime.fromisoformat(other.timestamp.replace('Z', '+00:00'))
class Page:
"""Class which represent a page containing a list of revisions"""
def __init__(self, id: str, namespace: str, title: str, revisions: Iterator[Revision], tfidf: Mapping, idf: Mapping, occurences_in_corpus: Mapping):
self.id = id # page id
self.namespace = namespace # page namespace
self.title = title # page title
self.revisions = revisions # list of revisions
self.tfidf=tfidf # tf-idf metrics
self.occurences_in_corpus = occurences_in_corpus # stemmed word occurences in corups (1 if the word appear in a corpus 0 othewise)
self.idf = idf # idf metric in corpus
def to_dict(self) -> Mapping:
"""Converts the object instance into a dictionary"""
obj = dict()
obj['id'] = self.id
obj['namespace'] = self.namespace
obj['title'] = self.title
obj['revisions'] = list()
for rev in self.revisions:
obj['revisions'].append(rev.to_dict())
obj['tf-idf'] = self.tfidf
obj['occurences_in_corupus'] = self.occurences_in_corpus
obj['idf'] = self.idf
return obj
def extract_revisions(
mw_page: mwxml.Page,
stats: Mapping,
only_last_revision: bool,
language: str,
stemmer: bool) -> Iterator[Revision]:
"""Extracts the history of a user_warning_template within a template page -> most important keywords."""
revisions = more_itertools.peekable(mw_page)
# Newest revisions, useful only if the only_last_revision flag is set equal to true
newest_revision = None
for mw_revision in revisions:
utils.dot()
# check if it's last revision
is_last_revision = not utils.has_next(revisions)
# remove html comments
text = utils.remove_comments(mw_revision.text or '')
# extract the template text and other info
template_info = extractors.user_warnings_template_words.userwarnings_words_extractor(text, language, stemmer)
# Build the revision
rev = Revision(
id=mw_revision.id,
user=mw_revision.user,
timestamp=mw_revision.timestamp.to_json(),
template_info=template_info,
)
# Check the oldest revisions possible
if not newest_revision:
newest_revision = rev
else:
newest_date = datetime.datetime.fromisoformat(newest_revision.timestamp.replace('Z', '+00:00'))
current_date = datetime.datetime.fromisoformat(mw_revision.timestamp.to_json().replace('Z', '+00:00'))
# change the revision if the current one is newer
if newest_date < current_date:
newest_revision = rev
# Update stats
stats['performance']['revisions_analyzed'] += 1
# requested only the last revision
if only_last_revision:
if is_last_revision:
yield newest_revision
else:
yield rev
def extract_pages(
dump: Iterable[mwxml.Page],
stats: Mapping,
only_last_revision: bool,
set_interval: Optional[str],
esclude_template_repetition: bool,
language: str,
stemmer: bool,
minimum_word_length: int) -> Iterator[Page]:
"""Extract the templates from an user page."""
counter = 1
# Loop on all the pages in the dump, one at a time
for mw_page in dump:
utils.log("Processing", mw_page.title)
# Skip non-template, according to https://en.wikipedia.org/wiki/Wikipedia:Namespace
if mw_page.namespace != 10:
utils.log('Skipped (namespace != 10)')
continue
# flag which tells if the revision can be stored
store_flag = False
# those revision can replace / be stored in the revision_storage
if not mw_page.title.lower() in user_warnings_templates:
store_flag = True
else:
counter += 1
revisions_generator = extract_revisions(
mw_page,
stats=stats,
only_last_revision=only_last_revision,
language=language,
stemmer=stemmer
)
revisions_list = list(revisions_generator)
# sort the revision list by date
revisions_list.sort()
# filtered revision list
filtered_revisions_list = list()
# reference revisions
reference_rev = None
# take the first reference revision and insert it
if revisions_list:
reference_rev = revisions_list[0]
filtered_revisions_list.append(reference_rev)
# partition time by time interval specified by set_interval
if set_interval or esclude_template_repetition:
for elem in revisions_list:
# ge the last inserted and current time interval
last_inserted_time = datetime.datetime.fromisoformat(reference_rev.timestamp.replace('Z', '+00:00'))
current_time = datetime.datetime.fromisoformat(elem.timestamp.replace('Z', '+00:00'))
condition = True
if set_interval:
# condition for the time interval
condition = condition and (current_time - last_inserted_time).total_seconds() < time_interval_in_seconds[set_interval]
if esclude_template_repetition:
# condition for the different regexp
condition = condition and reference_rev.template_info.template_text != elem.template_info.template_text
if condition:
filtered_revisions_list[-1] = elem # substitute because included in the time interval (partitioned by the time interval)
else:
# if there is the different regexp selected then inserted only if the previous one has different regexp than the current one
if not (esclude_template_repetition and reference_rev.template_info.template_text == elem.template_info.template_text):
filtered_revisions_list.append(elem)
reference_rev = elem
else:
# no tag selected
filtered_revisions_list = revisions_list
# filter out the empty revisions
filtered_revisions_list = [ rev for rev in filtered_revisions_list if rev.template_info.total_number_words != 0 ]
if store_flag:
# REVISION STORAGE update
rev_storage_size = len(REVISION_STORAGE)
filtered_rev_size = len(filtered_revisions_list)
# store the revision in this cache
if (rev_storage_size + filtered_rev_size) <= MAX_REVISION_CACHE:
# fill the revision storage
REVISION_STORAGE.extend(filtered_revisions_list)
elif rev_storage_size <= MAX_REVISION_CACHE:
# replace some revisions
min_length = min(rev_storage_size, filtered_rev_size)
for i in range(random.randrange(min_length)):
REVISION_STORAGE[i] = filtered_revisions_list[i]
else:
# fill and replace some revisions
filtered_rev_counter = 0
while(rev_storage_size < MAX_REVISION_CACHE):
REVISION_STORAGE.append(filtered_revisions_list[filtered_rev_counter])
filtered_rev_counter += 1
rev_storage_size += 1
for index in range(filtered_rev_counter, filtered_rev_size):
rev_storage_index = random.randrange(rev_storage_size)
REVISION_STORAGE[rev_storage_index] = filtered_revisions_list[index]
else:
# extended corpus
extended_corpus = list(filtered_revisions_list)
rev_range_size = len(REVISION_STORAGE)
# extended corpus
for index in range(len(filtered_revisions_list)):
extended_corpus.append(REVISION_STORAGE[random.randrange(rev_range_size)])
# element occur in document
is_in_document_dict = dict()
corpus_size = len(extended_corpus)
# word list
words_list = set()
# retrieve only the interesting words
for revision in filtered_revisions_list:
for word in revision.template_info.inf_retrieval:
words_list.add(word)
# is in document calculus
for revision in extended_corpus:
for word in revision.template_info.inf_retrieval:
# only in the interesting words
if word in words_list:
if not word in is_in_document_dict:
is_in_document_dict[word] = 1
else:
is_in_document_dict[word] += 1
# idf word calculus
idf_dict = dict() # idf per corpus
for word in is_in_document_dict:
idf_dict[word] = math.log(corpus_size / is_in_document_dict[word], 10)
# tf-idf calculus
# girare il loop o qualcosa di simile, vedere dopo come
tfidf = dict() # the corpus is constant, so it will be indicized by word and document
for word in is_in_document_dict: # for every word
tfidf[word] = dict()
for doc_index in range(len(filtered_revisions_list)): # for all document
rev = filtered_revisions_list[doc_index]
# calculate tf for word in document
if word in rev.template_info.inf_retrieval:
tf = rev.template_info.inf_retrieval[word] / rev.template_info.total_number_words
else:
tf = 0
# multiply it by the idf of that word
tfidf[word][doc_index] = tf * idf_dict[word]
# assign the words to keep
rev.words_to_search.append((word, tfidf[word][doc_index]))
# take the words needed
for rev in filtered_revisions_list:
k = int(rev.template_info.total_number_words / 2)
# words to search
rev.words_to_search.sort(key = lambda a: a[1], reverse=True)
# check if there's a minimum amount of character needed:
if minimum_word_length:
index = 0
for word,_ in rev.words_to_search:
# controlling the word size
if len(word) > minimum_word_length:
rev.words_to_search[index] = (word,_)
index += 1
rev.words_to_search = rev.words_to_search[:index]
# taking the k values with the highest tf-idf metric value associated
rev.words_to_search = [ el[0] for el in rev.words_to_search[:k] ]
# stats update
if not language in stats['user_warnings_templates']:
stats['user_warnings_templates'][language] = dict()
stats['user_warnings_templates'][language][mw_page.title] = dict()
stats['user_warnings_templates'][language][mw_page.title]['word_occurences'] = is_in_document_dict
stats['user_warnings_templates'][language][mw_page.title]['tf-idf'] = tfidf
page = Page(
id=mw_page.id,
namespace=mw_page.namespace,
title=mw_page.title,
revisions=filtered_revisions_list,
tfidf=tfidf,
idf=idf_dict,
occurences_in_corpus=is_in_document_dict
)
yield page
def configure_subparsers(subparsers):
"""Configure a new subparser for the known languages."""
parser = subparsers.add_parser(
| |
scalar estimate), we preserve the uncertainty that reflects the instability of statistical inference of a small N dataset.
#
# One may think that for large $N$, one can be indifferent between the two techniques since they offer similar inference, and might lean towards the computationally-simpler, frequentist methods. An individual in this position should consider the following quote by <NAME> (2005)[[1]](#scrollTo=nDdph0r1ABCn), before making such a decision:
#
# Sample sizes are never large. If $N$, is too small to get a sufficiently-precise estimate, you need to get more data (or make more assumptions). But once $N$, is "large enough," you can start subdividing the data to learn more (for example, in a public opinion poll, once you have a good estimate for the entire country, you can estimate among men and women, northerners and southerners, different age groups, etc.). $N$, is never enough because if it were "enough" you'd already be on to the next problem for which you need more data.
#
#
# + {"colab_type": "text", "id": "rACyvZBVdqB9", "cell_type": "markdown"}
# ## Are frequentist methods incorrect then?
# No.
#
# Frequentist methods are still useful or state-of-the-art in many areas. Tools such as least squares linear regression, LASSO regression, and expectation-maximization algorithms are all powerful and fast. Bayesian methods complement these techniques by solving problems that these approaches cannot, or by illuminating the underlying system with more flexible modeling.
#
# ### A note on *Big Data*
# Paradoxically, big data's predictive analytic problems are actually solved by relatively simple algorithms [[2]](#scrollTo=nDdph0r1ABCn)[[3]](#scrollTo=nDdph0r1ABCn). Thus we can argue that big data's prediction difficulty does not lie in the algorithm used, but instead on the computational difficulties of storage and execution on big data. (One should also consider Gelman's quote from above and ask "Do I really have big data?")
#
# The much more difficult analytic problems involve medium data and, especially troublesome, really small data. Using a similar argument as Gelman's above, if big data problems are big enough to be readily solved, then we should be more interested in the not-quite-big enough datasets.
# + {"colab_type": "text", "id": "TTUDkI8peKw6", "cell_type": "markdown"}
# ## Our Bayesian framework
# We are interested in beliefs, which can be interpreted as probabilities by thinking Bayesian. We have a prior belief in event A, beliefs formed by previous information, e.g., our prior belief about bugs being in our code before performing tests.
#
# Secondly, we observe our evidence. To continue our buggy-code example: if our code passes X tests, we want to update our belief to incorporate this. We call this new belief the posterior probability. Updating our belief is done via the following equation, known as Bayes' Theorem, after its discoverer Thomas Bayes:
#
# $$ P(A|X) = \frac{P(X | A) P(A) }{P(X) } $$
#
# $$ P(A|X) \propto{P(X | A) P(A) } $$
#
# NOTE: ($\propto$ is "proportional to")
#
#
# The above formula is not unique to Bayesian inference: it is a mathematical fact with uses outside Bayesian inference. Bayesian inference merely uses it to connect prior probabilities $P(A)$ with an updated posterior probabilities $P(A|X)$.
# + {"colab_type": "text", "id": "DkB3Ou8UjW-F", "cell_type": "markdown"}
#
# ## Example: Mandatory coin-flip example
# Every statistics text must contain a coin-flipping example, I'll use it here to get it out of the way. Suppose, naively, that you are unsure about the probability of heads in a coin flip (spoiler alert: it's 50%). You believe there is some true underlying ratio, call it p, but have no prior opinion on what p might be.
#
# We begin to flip a coin, and record the observations: either H or T. This is our observed data. An interesting question to ask is how our inference changes as we observe more and more data? More specifically, what do our posterior probabilities look like when we have little data, versus when we have lots of data.
#
# Below we plot a sequence of updating posterior probabilities as we observe increasing amounts of data (coin flips), while also demonstrating some of the best practices when it comes to evaluating tensors and plotting the data. First, the easy part: We define the values in our Tensorflow graph
# + {"colab": {}, "colab_type": "code", "id": "yFd9GboD7hVV"}
# Build Graph
rv_coin_flip_prior = tfp.distributions.Bernoulli(probs=0.5, dtype=tf.int32)
num_trials = tf.constant([0,1, 2, 3, 4, 5, 8, 15, 50, 500, 1000, 2000])
coin_flip_data = rv_coin_flip_prior.sample(num_trials[-1])
# prepend a 0 onto tally of heads and tails, for zeroth flip
coin_flip_data = tf.pad(coin_flip_data,tf.constant([[1, 0,]]),"CONSTANT")
# compute cumulative headcounts from 0 to 2000 flips, and then grab them at each of num_trials intervals
cumulative_headcounts = tf.gather(tf.cumsum(coin_flip_data), num_trials)
rv_observed_heads = tfp.distributions.Beta(
concentration1=tf.cast(1 + cumulative_headcounts, tf.float32),
concentration0=tf.cast(1 + num_trials - cumulative_headcounts, tf.float32))
probs_of_heads = tf.linspace(start=0., stop=1., num=100, name="linspace")
observed_probs_heads = tf.transpose(rv_observed_heads.prob(probs_of_heads[:, tf.newaxis]))
# + {"colab_type": "text", "id": "eVh-ugqN8NRy", "cell_type": "markdown"}
# Next we move onto executing the graph. When it comes to calculations that need to be made frequently and repeatedly, this method of first-defining and then executing graphs provides a handy speed boost. We can actually use a custom `evaluate()` function that allows us to evaluate tensors whether we are operating in TF Graph mode, or whether we have Eager mode active. The function looks like the following:
#
# ```python
#
# def evaluate(tensors):
# """Evaluates Tensor or EagerTensor to Numpy `ndarray`s.
# Args:
# tensors: Object of `Tensor` or EagerTensor`s; can be `list`, `tuple`,
# `namedtuple` or combinations thereof.
#
# Returns:
# ndarrays: Object with same structure as `tensors` except with `Tensor` or
# `EagerTensor`s replaced by Numpy `ndarray`s.
# """
# if tf.executing_eagerly():
# return tf.contrib.framework.nest.pack_sequence_as(
# tensors,
# [t.numpy() if tf.contrib.framework.is_tensor(t) else t
# for t in tf.contrib.framework.nest.flatten(tensors)])
# return sess.run(tensors)
#
# ```
#
# To plot the tensors, we need to convert them into numpy variables. One handy way of associating tensors with their corrresponding numpy variables is to append an underscore to the numpy-like arrays. For example, if the input to `evaluate()` is `variable`, then we assign that value to `variable_`. Below we see an example of how we use both `evaluate()` and this new styling.
# + {"colab": {}, "colab_type": "code", "id": "Ex3djpOu7_-m"}
# Execute graph
[num_trials_,
probs_of_heads_,
observed_probs_heads_,
cumulative_headcounts_,
] = evaluate([
num_trials,
probs_of_heads,
observed_probs_heads,
cumulative_headcounts
])
# + {"colab_type": "text", "id": "IUAm6LEA8FFW", "cell_type": "markdown"}
# Finally, we move onto plotting our evaluated tensors in matplotlib.
# + {"colab": {"base_uri": "https://localhost:8080/", "height": 697}, "colab_type": "code", "id": "4fdWiFUT6H-A", "outputId": "a232d7e8-6825-4e41-f363-1f6c07b176e7"}
# For the already prepared, I'm using Binomial's conj. prior.
plt.figure(figsize(16, 9))
for i in range(len(num_trials_)):
sx = plt.subplot(len(num_trials_)/2, 2, i+1)
plt.xlabel("$p$, probability of heads") \
if i in [0, len(num_trials_)-1] else None
plt.setp(sx.get_yticklabels(), visible=False)
plt.plot(probs_of_heads_, observed_probs_heads_[i],
label="observe %d tosses,\n %d heads" % (num_trials_[i], cumulative_headcounts_[i]))
plt.fill_between(probs_of_heads_, 0, observed_probs_heads_[i],
color=TFColor[3], alpha=0.4)
plt.vlines(0.5, 0, 4, color="k", linestyles="--", lw=1)
leg = plt.legend()
leg.get_frame().set_alpha(0.4)
plt.autoscale(tight=True)
plt.suptitle("Bayesian updating of posterior probabilities", y=1.02,
fontsize=14)
plt.tight_layout()
# + {"colab_type": "text", "id": "jTqKXlGRmKuh", "cell_type": "markdown"}
# The posterior probabilities are represented by the curves, and our uncertainty is proportional to the width of the curve. As the plot above shows, as we start to observe data our posterior probabilities start to shift and move around. Eventually, as we observe more and more data (coin-flips), our probabilities will tighten closer and closer around the true value of $p=0.5$ (marked by a dashed line).
#
# Notice that the plots are not always peaked at 0.5. There is no reason it should be: recall we assumed we did not have a prior opinion of what p is. In fact, if we observe quite extreme data, say 8 flips and only 1 observed heads, our distribution would look very biased away from lumping around 0.5 (with no prior opinion, how confident would you feel betting on a fair coin after observing 8 tails and 1 head?). As more data accumulates, we would see more and more probability being assigned at $p=0.5$, though never all of it.
#
# The next example is a simple demonstration of the mathematics of Bayesian inference.
# + {"colab_type": "text", "id": "5UKnxit-mevN", "cell_type": "markdown"}
# ## Example: Bug, or just sweet, unintended feature?
# Let $A$ denote the event that our code has no bugs in it. Let $X$ denote the event that the code passes all debugging tests. | |
"""
Update components for HySDS.
"""
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
import os
from fabric.api import execute, hide
from tqdm import tqdm
from prompt_toolkit.shortcuts import prompt, print_tokens
from prompt_toolkit.styles import style_from_dict
from pygments.token import Token
from sdscli.log_utils import logger
from sdscli.conf_utils import get_user_files_path, SettingsConf
from sdscli.prompt_utils import YesNoValidator, set_bar_desc
from . import fabfile as fab
prompt_style = style_from_dict({
Token.Alert: 'bg:#<PASSWORD>',
Token.Username: <PASSWORD>',
Token.Param: <PASSWORD>',
})
def update_mozart(conf, ndeps=False, config_only=False, comp='mozart'):
""""Update mozart component."""
num_updates = 31 if config_only else 45 # number of progress bar updates
with tqdm(total=num_updates) as bar: # progress bar
# ensure venv
set_bar_desc(bar, 'Ensuring HySDS venv')
execute(fab.ensure_venv, comp, roles=[comp])
bar.update()
# stop services
set_bar_desc(bar, 'Stopping mozartd')
execute(fab.mozartd_stop, roles=[comp])
bar.update()
# update reqs
if not config_only:
set_bar_desc(bar, 'Updating HySDS core')
execute(fab.pip_install_with_req, 'mozart', '~/mozart/ops/osaka', ndeps, roles=[comp])
bar.update()
execute(fab.pip_install_with_req, 'mozart', '~/mozart/ops/prov_es', ndeps, roles=[comp])
bar.update()
execute(fab.pip_install_with_req, 'mozart', '~/mozart/ops/hysds_commons', ndeps, roles=[comp])
bar.update()
execute(fab.pip_install_with_req, 'mozart', '~/mozart/ops/hysds', ndeps, roles=[comp])
bar.update()
execute(fab.pip_install_with_req, 'mozart', '~/mozart/ops/sciflo', ndeps, roles=[comp])
bar.update()
execute(fab.pip_install_with_req, 'mozart', '~/mozart/ops/chimera', ndeps, roles=[comp])
bar.update()
execute(fab.pip_install_with_req, 'mozart', '~/mozart/ops/mozart', ndeps, roles=[comp])
bar.update()
execute(fab.npm_install_package_json, '~/mozart/ops/hysds_ui', roles=[comp])
bar.update()
# set default ES shard number
set_bar_desc(bar, 'Setting default ES shard number')
execute(fab.install_base_es_template, roles=[comp])
bar.update()
# update logstash jvm.options to increase heap size
set_bar_desc(bar, 'Updating logstash jvm.options')
execute(fab.send_logstash_jvm_options, 'mozart', roles=[comp])
bar.update()
# update celery config
set_bar_desc(bar, 'Updating celery config')
execute(fab.rm_rf, '~/mozart/ops/hysds/celeryconfig.py', roles=[comp])
execute(fab.rm_rf, '~/mozart/ops/hysds/celeryconfig.pyc', roles=[comp])
execute(fab.send_celeryconf, 'mozart', roles=[comp])
bar.update()
# update supervisor config
set_bar_desc(bar, 'Updating supervisor config')
execute(fab.rm_rf, '~/mozart/etc/supervisord.conf', roles=[comp])
execute(fab.send_template_user_override, 'supervisord.conf.mozart',
'~/mozart/etc/supervisord.conf', '~/mozart/ops/hysds/configs/supervisor',
roles=[comp])
bar.update()
# update orchestrator config
set_bar_desc(bar, 'Updating orchestrator config')
execute(fab.rm_rf, '~/mozart/etc/orchestrator_*.json', roles=[comp])
execute(fab.copy, '~/mozart/ops/hysds/configs/orchestrator/orchestrator_jobs.json',
'~/mozart/etc/orchestrator_jobs.json', roles=[comp])
execute(fab.copy, '~/mozart/ops/hysds/configs/orchestrator/orchestrator_datasets.json',
'~/mozart/etc/orchestrator_datasets.json', roles=[comp])
bar.update()
# update job_creators
set_bar_desc(bar, 'Updating job_creators')
execute(fab.rm_rf, '~/mozart/etc/job_creators', roles=[comp])
execute(fab.cp_rp, '~/mozart/ops/hysds/scripts/job_creators',
'~/mozart/etc/', roles=[comp])
bar.update()
# update datasets config; overwrite datasets config with domain-specific config
set_bar_desc(bar, 'Updating datasets config')
execute(fab.rm_rf, '~/mozart/etc/datasets.json', roles=[comp])
execute(fab.send_template, 'datasets.json',
'~/mozart/etc/datasets.json', roles=[comp])
bar.update()
# ship logstash shipper configs
set_bar_desc(bar, 'Updating logstash shipper config')
execute(fab.send_shipper_conf, 'mozart', '~/mozart/log', conf.get('MOZART_ES_CLUSTER'),
'127.0.0.1', conf.get('METRICS_ES_CLUSTER'),
conf.get('METRICS_REDIS_PVT_IP'), roles=[comp])
bar.update()
# update HySDS scripts
set_bar_desc(bar, 'Updating HySDS scripts')
execute(fab.send_hysds_scripts, 'mozart', roles=[comp])
bar.update()
# update mozart config
set_bar_desc(bar, 'Updating mozart config')
execute(fab.rm_rf, '~/mozart/ops/mozart/settings.cfg', roles=[comp])
execute(fab.send_mozartconf, roles=[comp])
execute(fab.rm_rf, '~/mozart/ops/mozart/actions_config.json',
roles=[comp])
execute(fab.copy, '~/mozart/ops/mozart/configs/actions_config.json.example',
'~/mozart/ops/mozart/actions_config.json', roles=[comp])
bar.update()
# update hysds_ui config
set_bar_desc(bar, 'Updating hysds_ui config')
execute(fab.rm_rf, '~/mozart/ops/hysds_ui/src/config/index.js', roles=[comp])
execute(fab.send_hysds_ui_conf, roles=[comp])
bar.update()
# building HySDS UI
set_bar_desc(bar, 'Building HySDS UI')
execute(fab.build_hysds_ui, roles=[comp])
bar.update()
# create user_rules index
set_bar_desc(bar, 'Creating user_rules index')
execute(fab.create_user_rules_index, roles=[comp])
bar.update()
# create hysds_ios-mozart index
set_bar_desc(bar, 'Creating hysds_ios-grq index')
execute(fab.create_hysds_ios_index, roles=[comp])
bar.update()
# ensure self-signed SSL certs exist
set_bar_desc(bar, 'Configuring SSL')
execute(fab.ensure_ssl, 'mozart', roles=[comp])
bar.update()
# link ssl certs to apps
execute(fab.ln_sf, '~/ssl/server.key', '~/mozart/ops/mozart/server.key', roles=[comp])
execute(fab.ln_sf, '~/ssl/server.pem', '~/mozart/ops/mozart/server.pem', roles=[comp])
bar.update()
# expose hysds log dir via webdav
set_bar_desc(bar, 'Expose logs')
execute(fab.mkdir, '/data/work', None, None, roles=[comp])
execute(fab.ln_sf, '~/mozart/log', '/data/work/log', roles=[comp])
bar.update()
# ship netrc
set_bar_desc(bar, 'Configuring netrc')
execute(fab.send_template, 'netrc.mozart',
'.netrc', node_type='mozart', roles=[comp])
execute(fab.chmod, 600, '.netrc', roles=[comp])
bar.update()
# update ES template
set_bar_desc(bar, 'Update ES template')
execute(fab.install_pkg_es_templates, roles=[comp])
bar.update()
# ship AWS creds
set_bar_desc(bar, 'Configuring AWS creds')
execute(fab.send_awscreds, roles=[comp])
bar.update()
set_bar_desc(bar, 'Updated mozart')
# update verdi for code/config bundle
set_bar_desc(bar, 'Ensuring HySDS venv')
execute(fab.rm_rf, '~/verdi', roles=[comp])
execute(fab.ensure_venv, 'verdi',
update_bash_profile=False, roles=[comp])
bar.update()
# remove code bundle stuff
set_bar_desc(bar, 'Remove code bundle')
execute(fab.rm_rf, '~/verdi/ops/etc', roles=[comp])
execute(fab.rm_rf, '~/verdi/ops/install.sh', roles=[comp])
bar.update()
# update
set_bar_desc(bar, 'Syncing packages')
execute(fab.rm_rf, '~/verdi/ops/*', roles=[comp])
execute(fab.rsync_code, 'verdi', roles=[comp])
execute(fab.set_spyddder_settings, roles=[comp])
bar.update()
# update reqs
if not config_only:
set_bar_desc(bar, 'Updating HySDS core')
execute(fab.pip_install_with_req, 'verdi',
'~/verdi/ops/osaka', ndeps, roles=[comp])
bar.update()
execute(fab.pip_install_with_req, 'verdi',
'~/verdi/ops/prov_es', ndeps, roles=[comp])
bar.update()
execute(fab.pip_install_with_req, 'verdi',
'~/verdi/ops/hysds_commons', ndeps, roles=[comp])
bar.update()
execute(fab.pip_install_with_req, 'verdi',
'~/verdi/ops/hysds', ndeps, roles=[comp])
bar.update()
execute(fab.pip_install_with_req, 'verdi',
'~/verdi/ops/sciflo', ndeps, roles=[comp])
bar.update()
execute(fab.pip_install_with_req, 'verdi',
'~/verdi/ops/chimera', ndeps, roles=[comp])
bar.update()
# update celery config
set_bar_desc(bar, 'Updating celery config')
execute(fab.rm_rf, '~/verdi/ops/hysds/celeryconfig.py', roles=[comp])
execute(fab.rm_rf, '~/verdi/ops/hysds/celeryconfig.pyc', roles=[comp])
execute(fab.send_celeryconf, 'verdi-asg', roles=[comp])
bar.update()
# update supervisor config
set_bar_desc(bar, 'Updating supervisor config')
execute(fab.rm_rf, '~/verdi/etc/supervisord.conf', roles=[comp])
execute(fab.send_template_user_override, 'supervisord.conf.verdi',
'~/verdi/etc/supervisord.conf', '~/mozart/ops/hysds/configs/supervisor',
roles=[comp])
bar.update()
# update datasets config; overwrite datasets config with domain-specific config
set_bar_desc(bar, 'Updating datasets config')
execute(fab.rm_rf, '~/verdi/etc/datasets.json', roles=[comp])
execute(fab.send_template, 'datasets.json',
'~/verdi/etc/datasets.json', roles=[comp])
bar.update()
# ship logstash shipper configs
set_bar_desc(bar, 'Updating logstash shipper config')
execute(fab.send_shipper_conf, 'verdi-asg', '~/verdi/log', conf.get('MOZART_ES_CLUSTER'),
conf.get('MOZART_REDIS_PVT_IP'), conf.get('METRICS_ES_CLUSTER'),
conf.get('METRICS_REDIS_PVT_IP'), roles=[comp])
bar.update()
# update HySDS scripts
set_bar_desc(bar, 'Updating HySDS scripts')
execute(fab.send_hysds_scripts, 'verdi-asg', roles=[comp])
bar.update()
# ship netrc
netrc = os.path.join(get_user_files_path(), 'netrc')
if os.path.exists(netrc):
set_bar_desc(bar, 'Configuring netrc')
execute(fab.send_template, 'netrc', '.netrc.verdi', roles=[comp])
execute(fab.chmod, 600, '.netrc.verdi', roles=[comp])
# ship AWS creds
set_bar_desc(bar, 'Configuring AWS creds')
execute(fab.send_awscreds, suffix='.verdi', roles=[comp])
bar.update()
set_bar_desc(bar, 'Updated verdi code/config')
def update_metrics(conf, ndeps=False, config_only=False, comp='metrics'):
""""Update metrics component."""
num_updates = 15 if config_only else 22 # number of progress bar updates
with tqdm(total=num_updates) as bar: # progress bar
# ensure venv
set_bar_desc(bar, 'Ensuring HySDS venv')
execute(fab.ensure_venv, comp, roles=[comp])
bar.update()
# stop services
set_bar_desc(bar, 'Stopping metricsd')
execute(fab.metricsd_stop, roles=[comp])
bar.update()
# update
if not config_only:
set_bar_desc(bar, 'Syncing packages')
execute(fab.rm_rf, '~/metrics/ops/*', roles=[comp])
execute(fab.rsync_code, 'metrics', roles=[comp])
bar.update()
# update reqs
set_bar_desc(bar, 'Updating HySDS core')
execute(fab.pip_install_with_req, 'metrics',
'~/metrics/ops/osaka', ndeps, roles=[comp])
bar.update()
execute(fab.pip_install_with_req, 'metrics',
'~/metrics/ops/prov_es', ndeps, roles=[comp])
bar.update()
execute(fab.pip_install_with_req, 'metrics',
'~/metrics/ops/hysds_commons', ndeps, roles=[comp])
bar.update()
execute(fab.pip_install_with_req, 'metrics',
'~/metrics/ops/hysds', ndeps, roles=[comp])
bar.update()
execute(fab.pip_install_with_req, 'metrics',
'~/metrics/ops/sciflo', ndeps, roles=[comp])
bar.update()
execute(fab.pip_install_with_req, 'metrics',
'~/metrics/ops/chimera', ndeps, roles=[comp])
bar.update()
# update logstash jvm.options to increase heap size
set_bar_desc(bar, 'Updating logstash jvm.options')
execute(fab.send_logstash_jvm_options, 'metrics', roles=[comp])
bar.update()
# update celery config
set_bar_desc(bar, 'Updating celery config')
execute(fab.rm_rf, '~/metrics/ops/hysds/celeryconfig.py', roles=[comp])
bar.update()
execute(fab.rm_rf, '~/metrics/ops/hysds/celeryconfig.pyc',
roles=[comp])
bar.update()
execute(fab.send_celeryconf, 'metrics', roles=[comp])
bar.update()
# update supervisor config
set_bar_desc(bar, 'Updating supervisor config')
execute(fab.rm_rf, '~/metrics/etc/supervisord.conf', roles=[comp])
bar.update()
execute(fab.send_template_user_override, 'supervisord.conf.metrics',
'~/metrics/etc/supervisord.conf', '~/mozart/ops/hysds/configs/supervisor',
roles=[comp])
bar.update()
# update datasets config; overwrite datasets config with domain-specific config
set_bar_desc(bar, 'Updating datasets config')
execute(fab.rm_rf, '~/metrics/etc/datasets.json', roles=[comp])
bar.update()
execute(fab.send_template, 'datasets.json',
'~/metrics/etc/datasets.json', roles=[comp])
bar.update()
# ship logstash shipper configs
set_bar_desc(bar, 'Updating logstash shipper config')
execute(fab.send_shipper_conf, 'metrics', '~/metrics/log', conf.get('MOZART_ES_CLUSTER'),
conf.get('MOZART_REDIS_PVT_IP'), conf.get('METRICS_ES_CLUSTER'),
'127.0.0.1', roles=[comp])
bar.update()
# update HySDS scripts
set_bar_desc(bar, 'Updating HySDS scripts')
execute(fab.send_hysds_scripts, 'metrics', roles=[comp])
bar.update()
# ship kibana config
set_bar_desc(bar, 'Updating kibana config')
execute(fab.send_template, 'kibana.yml',
'~/kibana/config/kibana.yml', roles=[comp])
bar.update()
# expose hysds log dir via webdav
set_bar_desc(bar, 'Expose logs')
execute(fab.mkdir, '/data/work', None, None, roles=[comp])
execute(fab.ln_sf, '~/metrics/log', '/data/work/log', roles=[comp])
bar.update()
# ship AWS creds
set_bar_desc(bar, 'Configuring AWS creds')
execute(fab.send_awscreds, roles=[comp])
bar.update()
set_bar_desc(bar, 'Updated metrics')
def update_grq(conf, ndeps=False, config_only=False, comp='grq'):
""""Update grq component."""
num_updates = 18 if config_only else 27 # number of progress bar updates
with tqdm(total=num_updates) as bar: # progress bar
# ensure venv
set_bar_desc(bar, 'Ensuring HySDS venv')
execute(fab.ensure_venv, 'sciflo', roles=[comp])
bar.update()
# stop services
set_bar_desc(bar, 'Stopping grqd')
execute(fab.grqd_stop, roles=[comp])
bar.update()
# update
if not config_only:
set_bar_desc(bar, 'Syncing packages')
execute(fab.rm_rf, '~/sciflo/ops/*', roles=[comp])
execute(fab.rsync_code, 'grq', 'sciflo', roles=[comp])
execute(fab.pip_upgrade, 'gunicorn', 'sciflo',
roles=[comp]) # ensure latest gunicorn
bar.update()
# update reqs
set_bar_desc(bar, 'Updating HySDS core')
execute(fab.pip_install_with_req, 'sciflo',
'~/sciflo/ops/osaka', ndeps, roles=[comp])
bar.update()
execute(fab.pip_install_with_req, 'sciflo',
'~/sciflo/ops/prov_es', ndeps, roles=[comp])
bar.update()
execute(fab.pip_install_with_req, 'sciflo',
'~/sciflo/ops/hysds_commons', ndeps, roles=[comp])
bar.update()
execute(fab.pip_install_with_req, 'sciflo',
'~/sciflo/ops/hysds', ndeps, roles=[comp])
bar.update()
execute(fab.pip_install_with_req, 'sciflo',
'~/sciflo/ops/sciflo', ndeps, roles=[comp])
bar.update()
execute(fab.pip_install_with_req, 'sciflo',
'~/sciflo/ops/chimera', ndeps, roles=[comp])
bar.update()
execute(fab.pip_install_with_req, 'sciflo',
'~/sciflo/ops/grq2', ndeps, roles=[comp])
bar.update()
execute(fab.pip_install_with_req, 'sciflo',
'~/sciflo/ops/pele', ndeps, roles=[comp])
bar.update()
# set default ES shard number
set_bar_desc(bar, 'Setting default ES shard number')
execute(fab.install_base_es_template, roles=[comp])
bar.update()
# update celery config
set_bar_desc(bar, 'Updating celery config')
execute(fab.rm_rf, '~/sciflo/ops/hysds/celeryconfig.py', roles=[comp])
execute(fab.rm_rf, '~/sciflo/ops/hysds/celeryconfig.pyc', roles=[comp])
execute(fab.send_celeryconf, 'grq', roles=[comp])
bar.update()
# update grq2 config
set_bar_desc(bar, 'Updating grq2 config')
execute(fab.rm_rf, '~/sciflo/ops/grq2/settings.cfg', roles=[comp])
execute(fab.send_grq2conf, roles=[comp])
bar.update()
# update pele config
set_bar_desc(bar, 'Updating pele config')
execute(fab.rm_rf, '~/sciflo/ops/pele/settings.cfg', roles=[comp])
execute(fab.send_peleconf, 'pele_settings.cfg.tmpl', roles=[comp])
bar.update()
# create user_rules index
set_bar_desc(bar, 'Creating user_rules index')
execute(fab.create_grq_user_rules_index, roles=[comp])
bar.update()
# create hysds_ios-grq index
set_bar_desc(bar, 'Creating hysds_ios-grq index')
execute(fab.create_hysds_ios_grq_index, roles=[comp])
bar.update()
# update supervisor config
set_bar_desc(bar, 'Updating supervisor config')
execute(fab.rm_rf, '~/sciflo/etc/supervisord.conf', roles=[comp])
execute(fab.send_template_user_override, 'supervisord.conf.grq',
'~/sciflo/etc/supervisord.conf', '~/mozart/ops/hysds/configs/supervisor',
roles=[comp])
bar.update()
# update datasets config; overwrite datasets config with domain-specific config
set_bar_desc(bar, 'Updating datasets config')
execute(fab.rm_rf, '~/sciflo/etc/datasets.json', roles=[comp])
execute(fab.send_template, 'datasets.json',
'~/sciflo/etc/datasets.json', roles=[comp])
bar.update()
# ship logstash shipper configs
set_bar_desc(bar, 'Updating logstash shipper config')
execute(fab.send_shipper_conf, 'grq', '~/sciflo/log', conf.get('MOZART_ES_CLUSTER'),
conf.get('MOZART_REDIS_PVT_IP'), conf.get('METRICS_ES_CLUSTER'),
conf.get('METRICS_REDIS_PVT_IP'), roles=[comp])
bar.update()
# update HySDS scripts
set_bar_desc(bar, 'Updating HySDS scripts')
execute(fab.send_hysds_scripts, 'grq', roles=[comp])
bar.update()
# ensure self-signed SSL certs exist
set_bar_desc(bar, 'Configuring SSL')
execute(fab.ensure_ssl, 'grq', roles=[comp])
bar.update()
# link ssl certs to apps
execute(fab.ln_sf, '~/ssl/server.key',
'~/sciflo/ops/grq2/server.key', roles=[comp])
execute(fab.ln_sf, '~/ssl/server.pem',
'~/sciflo/ops/grq2/server.pem', roles=[comp])
execute(fab.ln_sf, '~/ssl/server.key',
'~/sciflo/ops/pele/server.key', roles=[comp])
execute(fab.ln_sf, '~/ssl/server.pem',
'~/sciflo/ops/pele/server.pem', roles=[comp])
bar.update()
# expose hysds log dir via webdav
set_bar_desc(bar, 'Expose logs')
execute(fab.mkdir, '/data/work', | |
"""
Created on Sep 14, 2017
@author: riteshagarwal
"""
import zlib
import copy
import json as Json
import json as pyJson
import os
import logging
import random
import socket
import string
import time
from httplib import IncompleteRead
from BucketLib.BucketOperations import BucketHelper
from BucketLib.MemcachedOperations import MemcachedHelper
from TestInput import TestInputServer
from cb_tools.cbstats import Cbstats
from couchbase_helper.document import DesignDocument
from couchbase_helper.documentgenerator import BatchedDocumentGenerator, \
doc_generator, SubdocDocumentGenerator
from membase.api.exception import \
N1QLQueryException, DropIndexException, CreateIndexException, \
DesignDocCreationException, QueryViewException, ReadDocumentException, \
RebalanceFailedException, ServerUnavailableException, \
BucketCreationException, AutoFailoverException, GetBucketInfoFailed,\
CompactViewFailed, SetViewInfoNotFound
from membase.api.rest_client import RestConnection
from java.util.concurrent import Callable
from java.lang import Thread
from remote.remote_util import RemoteUtilHelper, RemoteMachineShellConnection
from reactor.util.function import Tuples
import com.couchbase.test.transactions.SimpleTransaction as Transaction
import com.couchbase.client.java.json.JsonObject as JsonObject
from com.couchbase.client.java.kv import ReplicaMode
from Jython_tasks.task_manager import TaskManager
from table_view import TableView, plot_graph
from time import sleep
from couchbase_helper.durability_helper import DurableExceptions
class Task(Callable):
def __init__(self, thread_name):
self.thread_name = thread_name
self.exception = None
self.completed = False
self.started = False
self.start_time = None
self.end_time = None
self.log = logging.getLogger("infra")
self.test_log = logging.getLogger("test")
self.result = False
def __str__(self):
if self.exception:
raise self.exception
elif self.completed:
self.log.debug("Task %s completed on: %s"
% (self.thread_name,
str(time.strftime("%H:%M:%S",
time.gmtime(self.end_time)))))
return "%s task completed in %.2fs" % \
(self.thread_name, self.completed - self.started,)
elif self.started:
return "Thread %s at %s" % \
(self.thread_name,
str(time.strftime("%H:%M:%S",
time.gmtime(self.start_time))))
else:
return "[%s] not yet scheduled" % self.thread_name
def start_task(self):
self.started = True
self.start_time = time.time()
self.log.debug("Thread %s is started:" % self.thread_name)
def set_exception(self, exception):
self.exception = exception
self.complete_task()
raise BaseException(self.exception)
def complete_task(self):
self.completed = True
self.end_time = time.time()
self.log.debug("Thread %s is completed:" % self.thread_name)
def set_result(self, result):
self.result = result
def call(self):
raise NotImplementedError
@staticmethod
def wait_until(value_getter, condition, timeout_secs=300):
"""
Repeatedly calls value_getter returning the value when it
satisfies condition. Calls to value getter back off exponentially.
Useful if you simply want to synchronously wait for a condition to be
satisfied.
:param value_getter: no-arg function that gets a value
:param condition: single-arg function that tests the value
:param timeout_secs: number of seconds after which to timeout
default=300 seconds (5 mins.)
:return: the value returned by value_getter
:raises: Exception if the operation times out before
getting a value that satisfies condition
"""
start_time = time.time()
stop_time = start_time + timeout_secs
interval = 0.01
attempt = 0
value = value_getter()
while not condition(value):
now = time.time()
if timeout_secs < 0 or now < stop_time:
time.sleep(2**attempt * interval)
attempt += 1
value = value_getter()
else:
raise Exception('Timeout after {0} seconds and {1} attempts'
.format(now - start_time, attempt))
return value
class RebalanceTask(Task):
def __init__(self, servers, to_add=[], to_remove=[], do_stop=False,
progress=30, use_hostnames=False, services=None,
check_vbucket_shuffling=True, sleep_before_rebalance=0):
super(RebalanceTask, self).__init__("Rebalance_task_IN=[{}]_OUT=[{}]_{}"
.format(",".join([node.ip for node in to_add]),
",".join([node.ip for node in to_remove]),
str(time.time())))
self.servers = servers
self.to_add = to_add
self.to_remove = to_remove
self.start_time = None
self.services = services
self.monitor_vbuckets_shuffling = False
self.check_vbucket_shuffling = check_vbucket_shuffling
self.result = False
try:
self.rest = RestConnection(self.servers[0])
except ServerUnavailableException, e:
self.test_log.error(e)
raise e
self.retry_get_progress = 0
self.use_hostnames = use_hostnames
self.previous_progress = 0
self.old_vbuckets = {}
self.thread_used = "Rebalance_task"
cluster_stats = self.rest.get_cluster_stats()
self.table = TableView(self.test_log.info)
self.table.set_headers(["Nodes", "Services", "Status"])
node_ips_to_remove = [node.ip for node in to_remove]
for node, stat in cluster_stats.items():
node_ip = node.split(':')[0]
if node_ip in node_ips_to_remove:
self.table.add_row([node_ip,
cluster_stats[node]["services"],
"--- OUT --->"])
else:
self.table.add_row([node_ip,
stat["services"],
"Cluster node"])
def __str__(self):
if self.exception:
return "[%s] %s download error %s in %.2fs" % \
(self.thread_name, self.num_items, self.exception,
self.completed - self.started,) # , self.result)
elif self.completed:
self.test_log.debug("Time: %s"
% str(time.strftime("%H:%M:%S",
time.gmtime(time.time()))))
return "[%s] %s items loaded in %.2fs" % \
(self.thread_name, self.loaded,
self.completed - self.started,) # , self.result)
elif self.started:
return "[%s] %s started at %s" % \
(self.thread_name, self.num_items, self.started)
else:
return "[%s] %s not yet scheduled" % \
(self.thread_name, self.num_items)
def call(self):
self.start_task()
try:
if len(self.to_add) and len(self.to_add) == len(self.to_remove):
node_version_check = self.rest.check_node_versions()
non_swap_servers = set(self.servers) - set(self.to_remove) - set(self.to_add)
if self.check_vbucket_shuffling:
self.old_vbuckets = BucketHelper(self.servers[0])._get_vbuckets(non_swap_servers, None)
if self.old_vbuckets and self.check_vbucket_shuffling:
self.monitor_vbuckets_shuffling = True
if self.monitor_vbuckets_shuffling and node_version_check and self.services:
for service_group in self.services:
if "kv" not in service_group:
self.monitor_vbuckets_shuffling = False
if self.monitor_vbuckets_shuffling and node_version_check:
services_map = self.rest.get_nodes_services()
for remove_node in self.to_remove:
key = "{0}:{1}".format(remove_node.ip,
remove_node.port)
services = services_map[key]
if "kv" not in services:
self.monitor_vbuckets_shuffling = False
if self.monitor_vbuckets_shuffling:
self.test_log.info("Will monitor vbucket shuffling for "
"swap rebalance")
self.add_nodes()
self.start_rebalance()
self.table.display("Rebalance Overview")
self.check()
# self.task_manager.schedule(self)
except Exception as e:
self.exception = e
self.result = False
self.test_log.error(str(e))
return self.result
self.complete_task()
self.result = True
return self.result
def add_nodes(self):
master = self.servers[0]
services_for_node = None
node_index = 0
for node in self.to_add:
self.table.add_row([node.ip, services_for_node, "<--- IN ---"])
if self.services is not None:
services_for_node = [self.services[node_index]]
node_index += 1
if self.use_hostnames:
self.rest.add_node(master.rest_username, master.rest_password,
node.hostname, node.port,
services=services_for_node)
else:
self.rest.add_node(master.rest_username, master.rest_password,
node.ip, node.port,
services=services_for_node)
def start_rebalance(self):
nodes = self.rest.node_statuses()
# Determine whether its a cluster_run/not
cluster_run = True
firstIp = self.servers[0].ip
if len(self.servers) == 1 and self.servers[0].port == '8091':
cluster_run = False
else:
for node in self.servers:
if node.ip != firstIp:
cluster_run = False
break
remove_node_msg = "Removing node {0}:{1} from cluster"
ejectedNodes = list()
for server in self.to_remove:
for node in nodes:
if cluster_run:
if int(server.port) == int(node.port):
ejectedNodes.append(node.id)
self.test_log.debug(remove_node_msg.format(node.ip,
node.port))
else:
if self.use_hostnames:
if server.hostname == node.ip and int(server.port) == int(node.port):
ejectedNodes.append(node.id)
self.test_log.debug(remove_node_msg
.format(node.ip, node.port))
elif server.ip == node.ip and int(server.port) == int(node.port):
ejectedNodes.append(node.id)
self.test_log.debug(remove_node_msg.format(node.ip,
node.port))
if self.rest.is_cluster_mixed():
# workaround MB-8094
self.test_log.warning("Mixed cluster. Sleep 15secs before rebalance")
time.sleep(15)
self.rest.rebalance(otpNodes=[node.id for node in nodes],
ejectedNodes=ejectedNodes)
self.start_time = time.time()
def check(self):
try:
if self.monitor_vbuckets_shuffling:
non_swap_servers = set(self.servers) - set(self.to_remove) - set(self.to_add)
new_vbuckets = BucketHelper(self.servers[0])._get_vbuckets(non_swap_servers, None)
for vb_type in ["active_vb", "replica_vb"]:
for srv in non_swap_servers:
if set(self.old_vbuckets[srv][vb_type]) != set(new_vbuckets[srv][vb_type]):
msg = "%s vBuckets were shuffled on %s! " \
"Expected: %s, Got: %s" \
% (vb_type, srv.ip,
self.old_vbuckets[srv][vb_type],
new_vbuckets[srv][vb_type])
self.test_log.error(msg)
raise Exception(msg)
(status, progress) = self.rest._rebalance_status_and_progress()
self.test_log.info("Rebalance - status: %s, progress: %s", status,
progress)
# if ServerUnavailableException
if progress == -100:
self.retry_get_progress += 1
if self.previous_progress != progress:
self.previous_progress = progress
self.retry_get_progress = 0
else:
self.retry_get_progress += 1
except RebalanceFailedException as ex:
self.result = False
raise ex
# catch and set all unexpected exceptions
except Exception as e:
self.result = False
raise e
retry_get_process_num = 25
if self.rest.is_cluster_mixed():
""" for mix cluster, rebalance takes longer """
self.test_log.debug("Rebalance in mix cluster")
retry_get_process_num = 40
# we need to wait for status to be 'none'
# (i.e. rebalance actually finished and not just 'running' and at 100%)
# before we declare ourselves done
if progress != -1 and status != 'none':
if self.retry_get_progress < retry_get_process_num:
time.sleep(10)
self.check()
else:
self.result = False
self.rest.print_UI_logs()
raise RebalanceFailedException("seems like rebalance hangs. please check logs!")
else:
success_cleaned = []
for removed in self.to_remove:
try:
rest = RestConnection(removed)
except ServerUnavailableException, e:
self.test_log.error(e)
continue
start = time.time()
while time.time() - start < 30:
try:
if 'pools' in rest.get_pools_info() and \
(len(rest.get_pools_info()["pools"]) == 0):
success_cleaned.append(removed)
break
else:
time.sleep(0.1)
except (ServerUnavailableException, IncompleteRead), e:
self.test_log.error(e)
for node in set(self.to_remove) - set(success_cleaned):
self.test_log.error(
"Node {0}:{1} was not cleaned after removing from cluster"
.format(node.ip, node.port))
self.result = False
self.test_log.info(
"Rebalance completed with progress: {0}% in {1} sec"
.format(progress, time.time() - self.start_time))
self.result = True
return
class GenericLoadingTask(Task):
def __init__(self, cluster, bucket, client, batch_size=1, pause_secs=1,
timeout_secs=60, compression=True,
retries=5, transaction=False, commit=False,
suppress_error_table=False):
super(GenericLoadingTask, self).__init__("Loadgen_task_{}"
.format(time.time()))
self.batch_size = batch_size
self.pause = pause_secs
self.timeout = timeout_secs
self.cluster = cluster
self.bucket = bucket
self.client = client
self.random = random.Random()
self.retries = retries
self.suppress_error_table = suppress_error_table
def call(self):
self.start_task()
self.log.debug("Starting GenericLoadingTask thread")
try:
while self.has_next():
self.next()
except Exception as e:
self.test_log.error(e)
self.set_exception(Exception(e.message))
return
self.log.debug("Load generation thread completed")
self.complete_task()
def has_next(self):
raise NotImplementedError
def next(self):
raise NotImplementedError
# start of batch methods
def batch_create(self, key_val, shared_client=None, persist_to=0,
replicate_to=0, timeout=5, time_unit="seconds",
doc_type="json", durability="", skip_read_on_error=False):
"""
standalone method for creating key/values in batch (sans kvstore)
arguments:
key_val -- array of key/value dicts to load size = self.batch_size
shared_client -- optional client to use for data loading
"""
success = dict()
fail = dict()
try:
self._process_values_for_create(key_val)
client = shared_client or self.client
success, fail = client.setMulti(
key_val, self.exp, exp_unit=self.exp_unit,
persist_to=persist_to, replicate_to=replicate_to,
timeout=timeout, time_unit=time_unit, retry=self.retries,
doc_type=doc_type, durability=durability)
if fail:
if not self.suppress_error_table:
failed_item_table = TableView(self.test_log.info)
failed_item_table.set_headers(["Create doc_Id",
"Exception"])
try:
Thread.sleep(timeout)
except Exception as e:
self.test_log.error(e)
if not skip_read_on_error:
self.test_log.debug("Reading values {0} after failure"
.format(fail.keys()))
read_map = self.batch_read(fail.keys())
for key, value in fail.items():
| |
<gh_stars>10-100
# Mini-project 7 for Principles of Computing class, by k., 08/02/2014
# The Fifteen Puzzle
# http://www.codeskulptor.org/#poc_fifteen_template.py
'''
Loyd's Fifteen puzzle (solver and visualizer)
note that solved configuration has the blank (zero) tile in upper left;
use the arrows key to swap this tile with its neighbors
'''
#import poc_fifteen_gui
class Puzzle:
'''
class representation for The Fifteen Puzzle
'''
def __init__(self, puzzle_height, puzzle_width, initial_grid=None):
'''
initialize puzzle with default height and width;
returns a Puzzle object
'''
self._height = puzzle_height
self._width = puzzle_width
self._grid = [[col + puzzle_width * row
for col in range(self._width)]
for row in range(self._height)]
if initial_grid != None:
for row in range(puzzle_height):
for col in range(puzzle_width):
self._grid[row][col] = initial_grid[row][col]
def __str__(self):
'''
generate string representation for puzzle;
returns a string
'''
ans = ''
for row in range(self._height):
ans += str(self._grid[row])
ans += '\n'
return ans
# GUI methods
def get_height(self):
'''
getter for puzzle height; returns an integer
'''
return self._height
def get_width(self):
'''
getter for puzzle width; returns an integer
'''
return self._width
def get_number(self, row, col):
'''
getter for the number at tile position pos; returns an integer
'''
return self._grid[row][col]
def set_number(self, row, col, value):
'''
setter for the number at tile position pos
'''
self._grid[row][col] = value
def clone(self):
'''
make a copy of the puzzle to update during solving;
returns a Puzzle object
'''
new_puzzle = Puzzle(self._height, self._width, self._grid)
return new_puzzle
# core puzzle methods
def current_position(self, solved_row, solved_col):
'''
locate the current position of the tile that will be at
position (solved_row, solved_col) when the puzzle is solved;
returns a tuple of two integers
'''
solved_value = (solved_col + self._width * solved_row)
for row in range(self._height):
for col in range(self._width):
if self._grid[row][col] == solved_value:
return (row, col)
assert False, 'Value ' + str(solved_value) + ' not found'
def update_puzzle(self, move_string):
'''
updates the puzzle state based on the provided move string
'''
zero_row, zero_col = self.current_position(0, 0)
for direction in move_string:
if direction == 'l':
assert zero_col > 0, 'move off grid: ' + direction
self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]
self._grid[zero_row][zero_col - 1] = 0
zero_col -= 1
elif direction == 'r':
assert zero_col < self._width - 1, 'move off grid: ' + direction
self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]
self._grid[zero_row][zero_col + 1] = 0
zero_col += 1
elif direction == 'u':
assert zero_row > 0, 'move off grid: ' + direction
self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]
self._grid[zero_row - 1][zero_col] = 0
zero_row -= 1
elif direction == 'd':
assert zero_row < self._height - 1, 'move off grid: ' + direction
self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]
self._grid[zero_row + 1][zero_col] = 0
zero_row += 1
else:
assert False, 'invalid direction: ' + direction
# phase one methods
def lower_row_invariant(self, target_row, target_col):
'''
check whether the puzzle satisfies the specified invariant
at the given position in the bottom rows of the puzzle (target_row > 1);
returns a boolean
'''
# 0 tile is positioned at (i, j) as expected
if self.get_number(target_row, target_col) == 0:
# all tiles in row i to the right of position (i, j) are solved
for columns in range(target_col + 1, self.get_width()):
if not (target_row, columns) == self.current_position(target_row, columns):
return False
# all tiles in rows i + 1 or below are positioned at their solved location
# if 0 tile is in last row, no need to check for more
if not target_row + 1 == self.get_height():
for columns_bellow in range(0, self.get_width()):
if not (target_row + 1, columns_bellow) == self.current_position(target_row + 1, columns_bellow):
return False
return True
return False
def move(self, target_row, target_col, row, column):
'''
place a tile at target position;
target tile's current position must be either above the target position
(k < i) or on the same row to the left (i = k and l < j);
returns a move string
'''
move_it = ''
combo = 'druld'
# calculate deltas
column_delta = target_col - column
row_delta = target_row - row
# always move up at first
move_it += row_delta * 'u'
# simplest case, both tiles in the same column, combo 'ld' shall go first
if column_delta == 0:
move_it += 'ld' + (row_delta - 1) * combo
else:
# tile is on the left form target, specific move first
if column_delta > 0:
move_it += column_delta * 'l'
if row == 0:
move_it += (abs(column_delta) - 1) * 'drrul'
else:
move_it += (abs(column_delta) - 1) * 'urrdl'
# tile is on the right from target, specific move first
elif column_delta < 0:
move_it += (abs(column_delta) - 1) * 'r'
if row == 0:
move_it += abs(column_delta) * 'rdllu'
else:
move_it += abs(column_delta) * 'rulld'
# apply common move as last
move_it += row_delta * combo
return move_it
def solve_interior_tile(self, target_row, target_col):
'''
makes use of helper function move()
updates puzzle and returns a move string
'''
assert self.lower_row_invariant(target_row, target_col)
# unpack tile row and column values
row, column = self.current_position(target_row, target_col)
move_it = self.move(target_row, target_col, row, column)
self.update_puzzle(move_it)
assert self.lower_row_invariant(target_row, target_col - 1)
return move_it
def solve_col0_tile(self, target_row):
'''
solve tile in column zero on specified row (> 1);
updates puzzle and returns a move string
'''
assert self.lower_row_invariant(target_row, 0)
move_it = 'ur'
self.update_puzzle(move_it)
# unpack tile row and column values
row, column = self.current_position(target_row, 0)
# got lucky, target tile already in place
if row == target_row and column == 0:
# move tile zero to the right end of that row
step = (self.get_width() - 2) * 'r'
self.update_puzzle(step)
move_it += step
else:
# target tile to position (i-1, 1) and zero tile to position (i-1, 0)
step = self.move(target_row - 1, 1, row, column)
# use move string for a 3x2 puzzle to bring the target tile into position (i, 0),
# then moving tile zero to the right end of row i-1
step += 'ruldrdlurdluurddlu' + (self.get_width() - 1) * 'r'
self.update_puzzle(step)
move_it += step
assert self.lower_row_invariant(target_row - 1, self.get_width() - 1)
return move_it
# phase two methods
def row0_invariant(self, target_col):
'''
check whether the puzzle satisfies the row zero invariant at the given column (col > 1);
returns a boolean
'''
# if 0 tile is not in expected column, no need to check for more
if not self.get_number(0, target_col) == 0:
return False
for column in range(self.get_width()):
for row in range(self.get_height()):
# exclude tiles we aren't interested, then check if the rest of tiles is solved
if (row == 0 and column > target_col) or (row == 1 and column >= target_col) or row > 1:
if not (row, column) == self.current_position(row, column):
return False
return True
def row1_invariant(self, target_col):
'''
check whether the puzzle satisfies the row one invariant at the given column (col > 1);
returns a boolean
'''
# row 1 is limited case of general row invariant check,
# if row 1 is not solved, no need to check for more
if not self.lower_row_invariant(1, target_col):
return False
# check if all tiles in rows bellow row 1 are positioned at their solved location
for column in range(0, self.get_width()):
for row in range(2, self.get_height()):
if not (row, column) == self.current_position(row, column):
return False
return True
def solve_row0_tile(self, target_col):
'''
solve the tile in row zero at the specified column;
updates puzzle and returns a move string
'''
assert self.row0_invariant(target_col)
move_it = 'ld'
self.update_puzzle(move_it)
# unpack tile row and column values
row, column = self.current_position(0, target_col)
# got lucky, target tile already in place
if row == 0 and column == target_col:
return move_it
else:
# target tile to position (1, j-1) and zero tile to position (1, j-2)
step = self.move(1, target_col - 1, row, column)
# use move string for a 2x3 puzzle
step += 'urdlurrdluldrruld'
self.update_puzzle(step)
move_it += step
# TODO assert check fails for some reason, by k.
#assert self.row0_invariant(target_col - | |
<reponame>samcom12/anuga_core
#!/usr/bin/env python
from __future__ import print_function
from __future__ import division
from builtins import zip
from builtins import map
from builtins import str
from builtins import range
from past.utils import old_div
import unittest
from math import sqrt, pi
import tempfile
from anuga.abstract_2d_finite_volumes.quantity import *
from anuga.file_conversion.asc2dem import asc2dem
from anuga.config import epsilon
from anuga.fit_interpolate.fit import fit_to_mesh
#from anuga.pyvolution.least_squares import fit_to_mesh
from anuga.abstract_2d_finite_volumes.generic_domain \
import Generic_Domain
from anuga.geospatial_data.geospatial_data import Geospatial_data
from anuga.coordinate_transforms.geo_reference import Geo_reference
from anuga.geometry.polygon import *
import numpy as num
import pprint
#Aux for fit_interpolate.fit example
def linear_function(point):
point = num.array(point)
return point[:, 0]+3*point[:, 1]
#return point[:,1]
def axes2points(x, y):
"""Generate all combinations of grid point coordinates from x and y axes
Args:
* x: x coordinates (array)
* y: y coordinates (array)
Returns:
* P: Nx2 array consisting of coordinates for all
grid points defined by x and y axes. The x coordinate
will vary the fastest to match the way 2D numpy
arrays are laid out by default ('C' order). That way,
the x and y coordinates will match a corresponding
2D array A when flattened (A.flat[:] or A.reshape(-1))
Note:
Example
x = [1, 2, 3]
y = [10, 20]
P = [[1, 10],
[2, 10],
[3, 10],
[1, 20],
[2, 20],
[3, 20]]
"""
import numpy
# Reverse y coordinates to have them start at bottom of array
y = numpy.flipud(y)
# Repeat x coordinates for each y (fastest varying)
X = numpy.kron(numpy.ones(len(y)), x)
# Repeat y coordinates for each x (slowest varying)
Y = numpy.kron(y, numpy.ones(len(x)))
# Check
N = len(X)
assert len(Y) == N
# Create Nx2 array of x and y coordinates
X = numpy.reshape(X, (N, 1))
Y = numpy.reshape(Y, (N, 1))
P = numpy.concatenate((X, Y), axis=1)
# Return
return P
class Test_Quantity(unittest.TestCase):
def setUp(self):
a = [0.0, 0.0]
b = [0.0, 2.0]
c = [2.0, 0.0]
d = [0.0, 4.0]
e = [2.0, 2.0]
f = [4.0, 0.0]
points = [a, b, c, d, e, f]
#bac, bce, ecf, dbe
elements = [[1, 0, 2], [1, 2, 4], [4, 2, 5], [3, 1, 4]]
self.mesh1 = Generic_Domain(points[:3], [elements[0]])
self.mesh1.check_integrity()
#print self.mesh1.__class__
#print isinstance(self.mesh1, Domain)
self.mesh4 = Generic_Domain(points, elements)
self.mesh4.check_integrity()
# UTM round Onslow
a = [240000, 7620000]
b = [240000, 7680000]
c = [300000, 7620000]
points = [a, b, c]
elements = [[0, 2, 1]]
self.mesh_onslow = Generic_Domain(points, elements)
self.mesh_onslow.check_integrity()
def tearDown(self):
pass
#print " Tearing down"
def test_creation(self):
quantity = Quantity(self.mesh1, [[1, 2, 3]])
assert num.allclose(quantity.vertex_values, [[1., 2., 3.]])
try:
quantity = Quantity()
except:
pass
else:
raise Exception('Should have raised empty quantity exception')
# FIXME(Ole): Temporarily disabled 18 Jan 2009
#try:
# quantity = Quantity([1,2,3])
#except AssertionError:
# pass
#except:
# raise Exception('Should have raised "mising mesh object" error')
def test_creation_zeros(self):
quantity = Quantity(self.mesh1)
assert num.allclose(quantity.vertex_values, [[0., 0., 0.]])
quantity = Quantity(self.mesh4)
assert num.allclose(quantity.vertex_values, [[0., 0., 0.], [0., 0., 0.],
[0., 0., 0.], [0., 0., 0.]])
def test_set_boundary_values(self):
quantity = Quantity(self.mesh1)
quantity.set_boundary_values()
assert num.allclose(quantity.boundary_values, [0.0, 0.0, 0.0])
def test_set_boundary_values_with_function(self):
quantity = Quantity(self.mesh1)
#assert num.allclose(quantity.vertex_values, [[0.,0.,0.]])
def simple(x, y):
return x+3*y
quantity.set_boundary_values(simple)
assert num.allclose(quantity.boundary_values, [1.0, 4.0, 3.0])
def test_set_boundary_values_with_constant(self):
quantity = Quantity(self.mesh1)
#assert num.allclose(quantity.vertex_values, [[0.,0.,0.]])
quantity.set_boundary_values(10.0)
assert num.allclose(quantity.boundary_values, [10.0, 10.0, 10.0])
def test_set_boundary_values_with_array(self):
quantity = Quantity(self.mesh1)
#assert num.allclose(quantity.vertex_values, [[0.,0.,0.]])
quantity.set_boundary_values([10.0, 4.0, 5.0])
assert num.allclose(quantity.boundary_values, [10.0, 4.0, 5.0])
def test_set_boundary_values_with_wrong_sized_array(self):
quantity = Quantity(self.mesh1)
#assert num.allclose(quantity.vertex_values, [[0.,0.,0.]])
try:
quantity.set_boundary_values([10.0, 4.0, 5.0, 8.0])
except:
pass
else:
msg = 'Should have caught this'
raise Exception(msg)
def test_set_boundary_values_from_edges(self):
quantity = Quantity(self.mesh4)
def simple(x, y):
return x+3*y
quantity.set_values(simple)
assert num.allclose(quantity.boundary_values, [
0., 0., 0., 0., 0., 0.])
quantity.set_boundary_values_from_edges()
assert num.allclose(quantity.boundary_values, [
1., 3., 3., 6., 10., 9.])
def test_interpolation(self):
quantity = Quantity(self.mesh1, [[1, 2, 3]])
assert num.allclose(quantity.centroid_values, [2.0]) # Centroid
assert num.allclose(quantity.edge_values, [[2.5, 2.0, 1.5]])
def test_interpolation2(self):
quantity = Quantity(self.mesh4,
[[1, 2, 3], [5, 5, 5], [0, 0, 9], [-6, 3, 3]])
assert num.allclose(quantity.centroid_values, [
2., 5., 3., 0.]) # Centroid
quantity.extrapolate_second_order()
#print quantity.vertex_values
assert num.allclose(quantity.vertex_values, [[3.5, -1.0, 3.5],
[3.+2./3, 6.+2./3, 4.+2./3],
[4.6, 3.4, 1.],
[-5.0, 1.0, 4.0]])
#print quantity.edge_values
assert num.allclose(quantity.edge_values, [[1.25, 3.5, 1.25],
[5. + 2/3.0, 4.0
+ 1.0/6, 5.0 + 1.0/6],
[2.2, 2.8, 4.0],
[2.5, -0.5, -2.0]])
def test_save_to_array(self):
quantity = Quantity(self.mesh4,
[[1, 2, 3], [5, 5, 5], [0, 0, 9], [-6, 3, 3]])
assert num.allclose(quantity.centroid_values, [
2., 5., 3., 0.]) # Centroid
cellsize = 1.0
x, y, z = quantity.save_to_array(cellsize=cellsize, smooth=False)
#x,y,z = quantity.save_to_array(smooth=False)
from pprint import pprint
#pprint(x)
#pprint(y)
#pprint(z)
x_ex = [0., 1., 2., 3., 4.]
y_ex = [0., 1., 2., 3., 4.]
z_ex = [[2.00000000e+00, 2.50000000e+00, 0.00000000e+00,
4.50000000e+00, 9.00000000e+00],
[1.50000000e+00, 5.00000000e+00, 0.00000000e+00,
4.50000000e+00, -9.99900000e+03],
[3.00000000e+00, 3.00000000e+00, 3.00000000e+00,
-9.99900000e+03, -9.99900000e+03],
[-1.50000000e+00, -1.50000000e+00, -9.99900000e+03,
-9.99900000e+03, -9.99900000e+03],
[-6.00000000e+00, -9.99900000e+03, -9.99900000e+03,
-9.99900000e+03, -9.99900000e+03]]
assert num.allclose(x_ex, x)
assert num.allclose(y_ex, y)
assert num.allclose(z_ex, z)
Plot = False
if Plot:
import pylab
import numpy
#a = numpy.where(a == -9999, numpy.nan, a)
#a = numpy.where(a > 10.0, numpy.nan, a)
#z = z[::-1,:]
print(z)
print(z.shape)
print(x)
print(y)
nrows = z.shape[0]
ncols = z.shape[1]
ratio = float(nrows)/float(ncols)
print(ratio)
#y = numpy.arange(nrows)*cellsize
#x = numpy.arange(ncols)*cellsize
#Setup fig size to correpond to array size
fig = pylab.figure(figsize=(10, 10*ratio))
levels = numpy.arange(-7, 10, 0.1)
CF = pylab.contourf(x, y, z, levels=levels)
CB = pylab.colorbar(CF, shrink=0.8, extend='both')
#CC = pylab.contour(x,y,a, levels=levels)
pylab.show()
x, y, z = quantity.save_to_array(cellsize=cellsize, smooth=True)
x_ex = [0., 1., 2., 3., 4.]
y_ex = [0., 1., 2., 3., 4.]
z_ex = [[2.00000000e+00, 2.33333333e+00, 2.66666667e+00,
5.83333333e+00, 9.00000000e+00],
[2.50000000e+00, 2.83333333e+00, 2.66666667e+00,
5.83333333e+00, -9.99900000e+03],
[3.00000000e+00, 2.83333333e+00, 2.66666667e+00,
-9.99900000e+03, -9.99900000e+03],
[-1.50000000e+00, -1.66666667e+00, -9.99900000e+03,
-9.99900000e+03, -9.99900000e+03],
[-6.00000000e+00, -9.99900000e+03, -9.99900000e+03,
-9.99900000e+03, -9.99900000e+03]]
#pprint(z)
assert num.allclose(x_ex, x)
assert num.allclose(y_ex, y)
assert num.allclose(z_ex, z)
if Plot:
import pylab
import numpy
#a = numpy.where(a == -9999, numpy.nan, a)
#a = numpy.where(a > 10.0, numpy.nan, a)
#a = a[::-1,:]
nrows = z.shape[0]
ncols = z.shape[1]
ratio = float(nrows)/float(ncols)
print(ratio)
#Setup fig size to correpond to array size
fig = pylab.figure(figsize=(10, 10*ratio))
levels = numpy.arange(-7, 10, 0.1)
CF = pylab.contourf(x, y, z, levels=levels)
CB = pylab.colorbar(CF, shrink=0.8, extend='both')
#CC = pylab.contour(x,y,a, levels=[0.0,1.0,2.0,3.0])
pylab.show()
def test_get_extrema_1(self):
quantity = Quantity(self.mesh4,
[[1, 2, 3], [5, 5, 5], [0, 0, 9], [-6, 3, 3]])
assert num.allclose(quantity.centroid_values, [
2., 5., 3., 0.]) # Centroids
v = quantity.get_maximum_value()
assert v == 5
v = quantity.get_minimum_value()
assert v == 0
i = quantity.get_maximum_index()
assert i == 1
i = quantity.get_minimum_index()
assert i == 3
x, y = quantity.get_maximum_location()
xref, yref = 4.0/3, 4.0/3
assert x == xref
assert y == yref
v = quantity.get_values(interpolation_points=[[x, y]])
assert num.allclose(v, 5)
x, y = quantity.get_minimum_location()
v = quantity.get_values(interpolation_points=[[x, y]])
assert num.allclose(v, 0)
def test_get_maximum_2(self):
a = [0.0, 0.0]
b = [0.0, 2.0]
c = [2.0, 0.0]
d = [0.0, 4.0]
e = [2.0, 2.0]
f = [4.0, 0.0]
points = [a, b, c, d, e, f]
#bac, bce, ecf, dbe
vertices = [[1, 0, 2], [1, 2, 4], [4, 2, 5], [3, 1, 4]]
domain = Generic_Domain(points, vertices)
quantity = Quantity(domain)
quantity.set_values(lambda x, y: x+2*y) # 2 4 4 6
v = quantity.get_maximum_value()
assert v == 6
v = quantity.get_minimum_value()
assert v == 2
i = quantity.get_maximum_index()
assert i == 3
i = quantity.get_minimum_index()
assert i == 0
x, y = quantity.get_maximum_location()
xref, yref = 2.0/3, 8.0/3
assert x == xref
assert y == yref
v = quantity.get_values(interpolation_points=[[x, y]])
assert num.allclose(v, 6)
x, y = quantity.get_minimum_location()
v = quantity.get_values(interpolation_points=[[x, y]])
assert num.allclose(v, 2)
#Multiple locations for maximum -
#Test that the algorithm picks the first occurrence
v = quantity.get_maximum_value(indices=[0, 1, 2])
assert num.allclose(v, 4)
i = quantity.get_maximum_index(indices=[0, 1, 2])
assert i == 1
x, y = quantity.get_maximum_location(indices=[0, 1, 2])
xref, yref = 4.0/3, 4.0/3
assert x == xref
assert y == yref
v = quantity.get_values(interpolation_points=[[x, y]])
assert num.allclose(v, 4)
# More test of indices......
v = quantity.get_maximum_value(indices=[2, 3])
assert num.allclose(v, 6)
i = quantity.get_maximum_index(indices=[2, 3])
assert i == 3
x, y = quantity.get_maximum_location(indices=[2, 3])
xref, yref = 2.0/3, 8.0/3
assert x == xref
assert y == yref
v = quantity.get_values(interpolation_points=[[x, y]])
assert num.allclose(v, 6)
def test_boundary_allocation(self):
quantity = Quantity(self.mesh4,
[[1, 2, 3], [5, 5, 5], [0, 0, 9], [-6, 3, | |
# Copyright 2014 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import time
import datetime
import logging
from struct import pack
# python 3 support
import six
import binascii
__all__ = ('APNs', 'Message', 'Result')
# module level logger, defaults to "apnsclient.apns"
LOG = logging.getLogger(__name__)
class APNs(object):
""" APNs multicaster. """
def __init__(self, connection):
""" APNs client.
:Arguments:
- connection (:class:`Connection`): the connection to talk to.
"""
self._connection = connection
def send(self, message):
""" Send the message.
The method will block until the whole message is sent. The method
returns :class:`Result` object, which you can examine for possible
errors and retry attempts.
.. note::
If the client fails to connect to APNs, probably because your
network is down, then this method will raise the related
exception. However, if connection is successfully established,
but later on the IO fails, then this method will prepare a
retry message with the rest of the failed tokens.
Example::
# if you use cached connections, then store this session instance
# somewhere global, such that it will not be garbage collected
# after message is sent.
session = Session()
# get a cached connection, avoiding unnecessary SSL handshake
con = session.get_connection("push_production", cert_string=db_certificate)
message = Message(["token 1", "token 2"], alert="Message")
service = APNs(con)
try:
result = service.send(message)
except:
print "Check your network, I could not connect to APNs"
else:
for token, (reason, explanation) in result.failed.items():
delete_token(token) # stop using that token
for reason, explanation in result.errors:
pass # handle generic errors
if result.needs_retry():
# extract failed tokens as new message
message = message.retry()
# re-schedule task with the new message after some delay
:Returns:
:class:`Result` object with operation results.
"""
if len(message.tokens) == 0:
LOG.warning("Message without device tokens is ignored")
return Result(message)
status = self._connection.send(message)
return Result(message, status)
def feedback(self):
""" Fetch feedback from APNs.
The method returns generator of ``(token, datetime)`` pairs,
denoting the timestamp when APNs has detected the device token is
not available anymore, probably because application was
uninstalled. You have to stop sending notifications to that device
token unless it has been re-registered since reported timestamp.
Unlike sending the message, you should fetch the feedback using
non-cached connection. Once whole feedback has been read, this
method will automatically close the connection.
.. note::
If the client fails to connect to APNs, probably because your
network is down, then this method will raise the related
exception. However, if connection is successfully established,
but later on the IO fails, then this method will simply stop
iterating. The rest of the failed tokens will be delivered
during the next feedback session.
Example::
session = Session()
# get non-cached connection, free from possible garbage
con = session.new_connection("feedback_production", cert_string=db_certificate)
service = APNs(con)
try:
# on any IO failure after successfull connection this generator
# will simply stop iterating. you will pick the rest of the tokens
# during next feedback session.
for token, when in service.feedback():
# every time a devices sends you a token, you should store
# {token: given_token, last_update: datetime.datetime.now()}
last_update = get_last_update_of_token(token)
if last_update < when:
# the token wasn't updated after the failure has
# been reported, so the token is invalid and you should
# stop sending messages to it.
remove_token(token)
except:
print "Check your network, I could not connect to APNs"
:Returns:
generator over ``(binary, datetime)``
"""
# FIXME: this library is not idiot proof. If you store returned generator
# somewhere, then yes, the connection will remain locked.
for token, timestamp in self._connection.feedback():
yield (token, self._datetime_from_timestamp(timestamp))
# override if you use custom datetime or weird timezones
def _datetime_from_timestamp(self, timestamp):
""" Converts integer timestamp to ``datetime`` object. """
return datetime.datetime.fromtimestamp(timestamp)
class Message(object):
""" The notification message. """
# JSON serialization parameters. Assume UTF-8 by default.
json_parameters = {
'separators': (',',':'),
'ensure_ascii': False,
}
# Default expiry (1 day).
DEFAULT_EXPIRY = datetime.timedelta(days=1)
# Default message priority
DEFAULT_PRIORITY = 10
def __init__(self, tokens, alert=None, badge=None, sound=None, content_available=None,
expiry=None, payload=None, priority=DEFAULT_PRIORITY, extra=None,
**extra_kwargs):
""" The push notification to one or more device tokens.
Read more `about the payload
<https://developer.apple.com/library/mac/documentation/NetworkingInternet/Conceptual/RemoteNotificationsPG/Chapters/ApplePushService.html#//apple_ref/doc/uid/TP40008194-CH100-SW1>`_.
.. note::
In order to stay future compatible this class doesn't transform
provided arguments in any way. It is your responsibility to
provide correct values and ensure the payload does not exceed
the limit of 256 bytes. You can also generate whole payload
yourself and provide it via ``payload`` argument. The payload
will be parsed to init default fields like alert and badge.
However if parsing fails, then these standard fields will
become unavailable. If raw payload is provided, then other data
fields like alert or sound are not allowed.
:Arguments:
- tokens (str or list): set of device tokens where to the message will be sent.
- alert (str or dict): the message; read APNs manual for recognized dict keys.
- badge (int or str): badge number over the application icon or special value such as "increment".
- sound (str): sound file to play on arrival.
- content_available (int): set to 1 to indicate new content is available.
- expiry (int, datetime or timedelta): timestamp when message will expire.
- payload (dict or str): JSON-compatible dictionary with the
complete message payload. If supplied, it is given instead
of all the other, more specific parameters.
- priority (int): priority of the message, defaults to 10
- extra (dict): extra payload key-value pairs.
- extra_kwargs (kwargs): extra payload key-value paris, will be merged with ``extra``.
"""
if payload is not None and ([v for v in (alert, badge, sound, content_available, extra) if v is not None] or extra_kwargs):
# Raise an error if both `payload` and the more specific parameters are supplied.
raise ValueError("Payload specified together with alert/badge/sound/content_available/extra.")
# single token is provided, wrap as list
if isinstance(tokens, six.string_types) or isinstance(tokens, six.binary_type):
tokens = [tokens]
self._tokens = tokens
self._payload = payload
self.priority = int(priority) # has to be integer because will be formatted into a binary
self.expiry = self._get_expiry_timestamp(expiry)
if payload is not None and hasattr(payload, "get") and payload.get("aps"):
# try to reinit fields from the payload
aps = payload["aps"]
self.alert = aps.get("alert")
self.badge = aps.get("badge")
self.sound = aps.get("sound")
self.content_available = aps.get("content-available")
self.extra = dict([(k, v) for (k, v) in six.iteritems(payload) if k != 'aps'])
elif payload is None:
# normal message initialization
self.alert = alert
self.badge = badge
self.sound = sound
self.content_available = content_available
_extra = {}
if extra:
_extra.update(extra)
if extra_kwargs:
_extra.update(extra_kwargs)
self.extra = _extra
if 'aps' in self.extra:
raise ValueError("Extra payload data may not contain 'aps' key.")
# else: payload provided as unrecognized value, don't init fields,
# they will raise AttributeError on access
# override if you use funky expiry values
def _get_expiry_timestamp(self, expiry):
""" Convert expiry value to a timestamp (integer).
Provided value can be a date or timedelta.
"""
if expiry is None:
# 0 means do not store messages at all. so we have to choose default
# expiry, which is here 1 day.
expiry = self.DEFAULT_EXPIRY
if isinstance(expiry, datetime.timedelta):
expiry = self._get_current_datetime() + expiry
if isinstance(expiry, datetime.datetime):
expiry = time.mktime(expiry.timetuple())
return int(expiry)
# override if you use funky timezones
def _get_current_datetime(self):
""" Returns current date and time. """
return datetime.datetime.now()
def __getstate__(self):
""" Returns ``dict`` with ``__init__`` arguments.
If you use ``pickle``, then simply pickle/unpickle the message object.
If you use something else, like JSON, then::
# obtain state dict from message
state = message.__getstate__()
# send/store the state
# recover state and restore message
| |
linked_aop = AffordanceObjectPair(aop.affordance, body_target, aop.affordance, None)
if not linked_aop.test(linked_context):
self.cancel()
return False
execute_result = linked_aop.interaction_factory(linked_context)
linked_si = execute_result.interaction
else:
posture_transition_context = PostureContext(self.interaction.context.source, self.interaction._priority, None)
linked_posture_state = PostureState(linked_sim, linked_sim.posture_state, linked_destination_spec, {})
linked_target_posture = linked_posture_state.body
linked_target_posture.source_interaction = linked_si
linked_target_posture.transfer_exit_clothing_change(linked_sim.posture_state.body)
if linked_target_posture._primitive is None:
if self.interaction is None or self.interaction.should_push_posture_primitive_for_multi_exit():
transition = must_run(linked_target_posture.begin(None, linked_posture_state, posture_transition_context, linked_sim.routing_surface))
transition = None
with self.deferred_derailment():
result = yield from self.run_super_interaction(timeline, linked_si, pre_run_behavior=transition)
if not result:
self.cancel()
return False
def multi_posture_exit(var_map):
master_posture = self._create_posture_state(sim.posture_state, sim_edge, var_map)
if master_posture is not None:
if linked_target_posture is not None:
master_posture.linked_posture_state = linked_posture_state
return master_posture
result = yield from self._create_transition_interaction(timeline, sim, sim_edge, multi_posture_exit, None,
(ParticipantType.Actor), linked_sim=linked_sim)
return result
if False:
yield None
return do_transition_multi_exit
def _run_interaction_privacy_tests(self, privacy_interaction, sim):
resolver = privacy_interaction.get_resolver(target=sim)
return privacy_interaction.privacy.tests.run_tests(resolver)
def _determine_privacy_interaction(self, sim):
if self.interaction.privacy is not None:
return self.interaction
sim_data = self._sim_data.get(sim)
for transition_spec in reversed(sim_data.path_spec.transition_specs):
transition_interactions = transition_spec.transition_interactions(sim)
if not transition_interactions:
continue
for interaction, _ in reversed(transition_interactions):
if interaction is not None and interaction.privacy is not None and interaction.pipeline_progress < PipelineProgress.EXITED:
return interaction
def _get_privacy_status(self, sim):
privacy_interaction = self._determine_privacy_interaction(sim)
if not privacy_interaction:
return (None, None)
participant_type = privacy_interaction.get_participant_type(sim)
if participant_type == ParticipantType.Actor and privacy_interaction.privacy:
if privacy_interaction.privacy_test_cache is None:
privacy_interaction.privacy_test_cache = self._run_interaction_privacy_tests(privacy_interaction, sim)
else:
return privacy_interaction.privacy_test_cache or (None, None)
if not privacy_interaction.get_liability(PRIVACY_LIABILITY):
remaining_transitions = self.get_remaining_transitions(sim)
engage_privacy = False
if sim.posture_state.body.mobile:
engage_privacy = remaining_transitions[0].body_posture.mobile or True
elif len(remaining_transitions) == 1:
engage_privacy = True
elif engage_privacy:
return (self.PRIVACY_ENGAGE, privacy_interaction)
else:
if not privacy_interaction.get_liability(PRIVACY_LIABILITY).privacy.has_shooed:
return (self.PRIVACY_SHOO, privacy_interaction)
if privacy_interaction.get_liability(PRIVACY_LIABILITY).privacy.find_violating_sims():
return (
self.PRIVACY_BLOCK, privacy_interaction)
return (None, None)
def _get_putdown_transition_info(self, sim, actor_transitions, current_state, next_state):
wait_to_be_picked_up_liability = self._interaction.get_liability(WaitToBePickedUpLiability.LIABILITY_TOKEN)
if wait_to_be_picked_up_liability is not None:
self.derail(DerailReason.WAIT_TO_BE_PUT_DOWN, sim)
return (None, None, None, None, None)
if self._is_putdown_interaction(target=sim):
return
if not actor_transitions:
return
current_body_posture_target = current_state.body.target
next_body_posture_target = next_state.body.target
if not current_body_posture_target is None:
if current_body_posture_target.is_sim or next_body_posture_target is not None and next_body_posture_target.is_sim:
if len(actor_transitions) == 1:
return
carrying_sim = next_state.body.target
preferred_carrying_sim = self._interaction.context.preferred_carrying_sim
if preferred_carrying_sim is not carrying_sim:
animation_work = self._get_animation_work(self.CALL_OVER_ANIMATION)
else:
animation_work = None
elif current_body_posture_target is not None and current_body_posture_target.is_sim:
carrying_sim = next_body_posture_target is None or next_body_posture_target.is_sim or current_body_posture_target
animation_work = None
else:
return
put_down_position, put_down_routing_surface = sim.get_initial_put_down_position()
social_group = self._interaction.social_group
if social_group is not None and sim in social_group:
if carrying_sim in social_group:
put_down_position = social_group.position
put_down_routing_surface = social_group.routing_surface
context = InteractionContext(carrying_sim, (InteractionSource.POSTURE_GRAPH), (Priority.High), carry_target=sim, insert_strategy=(QueueInsertStrategy.FIRST),
must_run_next=True)
interaction_parameters = {'put_down_position':put_down_position,
'put_down_routing_surface':put_down_routing_surface}
post_carry_aspect = actor_transitions[0].body if len(actor_transitions) < 2 else actor_transitions[1].body
if post_carry_aspect.posture_type.multi_sim:
return
if post_carry_aspect.target is not None:
for aop in (sim.get_provided_aops_gen)((post_carry_aspect.target), context, **interaction_parameters):
affordance = aop.affordance
if not affordance.is_putdown:
continue
if affordance.get_provided_posture() is not post_carry_aspect.posture_type:
continue
if not aop.test(context):
continue
break
else:
self.derail(DerailReason.TRANSITION_FAILED, sim)
return (None, None, None, None, None)
else:
aop = AffordanceObjectPair((SuperInteraction.CARRY_POSTURE_REPLACEMENT_AFFORDANCE), sim,
(SuperInteraction.CARRY_POSTURE_REPLACEMENT_AFFORDANCE), None, **interaction_parameters)
def _on_finish(pickup_interaction):
if not pickup_interaction.is_finishing_naturally:
if self._interaction.is_cancel_aop:
self.derail(DerailReason.WAIT_TO_BE_PUT_DOWN, sim)
else:
self._interaction.cancel((pickup_interaction.finishing_type), cancel_reason_msg='Unable to complete pick up')
self.derail(DerailReason.TRANSITION_FAILED, sim)
result = aop.test_and_execute(context)
if not result:
self.derail(DerailReason.TRANSITION_FAILED, sim)
pick_up_interaction = result.interaction
for si in sim.si_state.all_guaranteed_si_gen(pick_up_interaction.priority, pick_up_interaction.group_id):
si.cancel((FinishingType.INTERACTION_INCOMPATIBILITY), cancel_reason_msg='Canceling in order to be picked up.')
self._interaction.set_saved_participant(0, carrying_sim)
pick_up_liability = PickUpSimLiability(self._interaction, _on_finish)
pick_up_interaction.add_liability(PickUpSimLiability.LIABILITY_TOKEN, pick_up_liability)
self.derail(DerailReason.WAIT_TO_BE_PUT_DOWN, sim)
return (None, None, None, None, animation_work)
def _handle_teleport_style_interaction_transition_info(self, sim, actor_transitions, current_state, next_state):
teleport_style_aop = None
if TeleportHelper.can_teleport_style_be_injected_before_interaction(sim, self.interaction):
remaining_transition_specs = self._get_path_spec(sim).remaining_original_transition_specs()
final_routing_location = None
for spec in remaining_transition_specs:
if spec.portal_obj is not None:
portal_inst = spec.portal_obj.get_portal_by_id(spec.portal_id)
if portal_inst is not None:
portal_template = portal_inst.portal_template
if not portal_template.allow_teleport_style_interaction_to_skip_portal:
break
if spec.path is not None:
if spec.path.final_location.routing_surface.type != SurfaceType.SURFACETYPE_WORLD:
continue
final_routing_location = spec.path.final_location
if final_routing_location is not None:
pick_type = PickType.PICK_TERRAIN
location = final_routing_location.transform.translation
routing_surface = final_routing_location.routing_surface
lot_id = None
level = sim.level
alt = False
control = False
shift = False
ignore_neighborhood_id = False
override_target = TerrainPoint(final_routing_location)
if self.interaction.context.pick is not None:
parent_pick = self.interaction.context.pick
lot_id = parent_pick.lot_id
level = parent_pick.level
ignore_neighborhood_id = parent_pick.ignore_neighborhood_id
alt = parent_pick.modifiers.alt
control = parent_pick.modifiers.control
shift = parent_pick.modifiers.shift
else:
if self.interaction.target is not None:
parent_target = self.interaction.target
level = parent_target.level
override_pick = PickInfo(pick_type=pick_type, target=override_target, location=location, routing_surface=routing_surface,
lot_id=lot_id,
level=level,
alt=alt,
control=control,
shift=shift,
ignore_neighborhood_id=ignore_neighborhood_id)
teleport_style_aop, interaction_context, _ = sim.get_teleport_style_interaction_aop((self.interaction), override_pick=override_pick, override_target=override_target)
self.interaction.add_liability(TeleportStyleInjectionLiability.LIABILITY_TOKEN, TeleportStyleInjectionLiability())
if teleport_style_aop is not None:
execute_result = teleport_style_aop.execute(interaction_context)
if execute_result:
return True
return False
def _handle_vehicle_dismount(self, sim, current_state, next_state, vehicle_info):
vehicle, vehicle_component, current_posture_on_vehicle, next_posture_on_vehicle = vehicle_info
if current_posture_on_vehicle:
if self._vehicle_transition_states[sim] != VehicleTransitionState.NO_STATE:
return False
remaining_transition_specs = self._get_path_spec(sim).remaining_original_transition_specs()
path = None
object_manager = services.object_manager()
for spec in remaining_transition_specs:
if spec.path is None:
continue
path = spec.path
path_nodes = path.nodes
dismount_node = None
dismount_dist = 0.0
redeploy_node = None
redeploy_dist = 0.0
if not (len(path_nodes) > 1 and any((node.portal_object_id for node in path_nodes))):
if any((node.tracked_terrain_tags for node in path.nodes)):
nodes = list(path_nodes)
prev_node = path_nodes[0]
next_node = None
for next_node in nodes[1:]:
node_dist = (Vector3(*next_node.position) - Vector3(*prev_node.position)).magnitude()
if redeploy_node is not None:
redeploy_dist += node_dist
portal_obj_id = prev_node.portal_object_id
portal_obj = object_manager.get(portal_obj_id) if portal_obj_id else None
if prev_node.portal_id:
dismount_node = portal_obj and vehicle_component.can_transition_through_portal(portal_obj, prev_node.portal_id) or (prev_node if dismount_node is None else dismount_node)
redeploy_node = next_node
redeploy_dist = 0.0
else:
if not vehicle_component.can_transition_over_node(next_node, prev_node):
dismount_node = next_node if dismount_node is None else dismount_node
dismount_dist += node_dist
break
else:
if redeploy_node is None:
dismount_dist += node_dist
if redeploy_node:
if redeploy_dist >= vehicle_component.minimum_route_distance:
break
prev_node = next_node
if dismount_node is None:
dismount_dist = path.length()
if not next_posture_on_vehicle:
dismount_node = path_nodes[(-1)]
break
if path is None:
return False
defer_position = None
if dismount_dist < vehicle_component.minimum_route_distance:
if not next_posture_on_vehicle:
return False
if dismount_node is not None:
for si in sim.si_state.sis_actor_gen():
if si.target is vehicle:
if si.affordance is vehicle_component.drive_affordance:
if redeploy_node is not None:
footprint = vehicle.footprint_polygon
if not footprint.contains(path.start_location.position):
location = Location(Transform(Vector3(*redeploy_node.position), Quaternion(*redeploy_node.orientation)), redeploy_node.routing_surface_id)
result = vehicle_component.push_auto_deploy_affordance(sim, location)
if not result:
return False
self.derail(DerailReason.MUST_EXIT_MOBILE_POSTURE_OBJECT, sim)
si.cancel(FinishingType.DISPLACED, 'Vehicle Dismount for Portal.')
return True
else:
self.derail(DerailReason.MUST_EXIT_MOBILE_POSTURE_OBJECT, sim)
return True
else:
return False
else:
if dismount_node is not None:
defer_position = Vector3(*dismount_node.position)
defer_location = Location(Transform(defer_position, Quaternion.IDENTITY()), dismount_node.routing_surface_id)
execute_result = vehicle_component.push_dismount_affordance(sim, defer_location)
if execute_result:
self.derail(DerailReason.MUST_EXIT_MOBILE_POSTURE_OBJECT, sim)
return True
return False
def _handle_vehicle_transition_info(self, sim, actor_transitions, current_state, next_state):
vehicle = current_state.body[BODY_TARGET_INDEX]
vehicle_component = vehicle.vehicle_component if vehicle is not None else None
current_posture_on_vehicle = vehicle_component is not None
next_posture_on_vehicle = next_state.is_on_vehicle()
vehicle_info = (vehicle, vehicle_component, current_posture_on_vehicle, next_posture_on_vehicle)
deployed_vehicle = self._deployed_vehicles.get(sim, None)
if self._handle_vehicle_dismount(sim, current_state, next_state, vehicle_info):
return
if self._vehicle_transition_states[sim] != VehicleTransitionState.DEPLOYING:
path_spec = self._get_path_spec(sim)
previous_posture_spec = path_spec.previous_posture_spec
if current_state == previous_posture_spec:
path_progress = path_spec.path_progress
if path_progress >= 2:
previous_posture_spec = path_spec.path[(path_progress - 2)]
if current_posture_on_vehicle and not next_posture_on_vehicle:
if len(path_spec.path) == 2:
if previous_posture_spec is not None and previous_posture_spec.body_posture.is_vehicle:
self._vehicle_transition_states[sim] = VehicleTransitionState.NO_STATE
vehicle = previous_posture_spec.body_target
vehicle_component = vehicle.vehicle_component if vehicle is not None else None
if vehicle_component is not None and not self._should_skip_vehicle_retrieval(path_spec.remaining_original_transition_specs()):
if vehicle_component.retrieve_tuning is not None and sim.routing_surface == vehicle.routing_surface and sim.household_id == vehicle.household_owner_id and vehicle.inventoryitem_component is not None and self.sim.inventory_component.can_add(vehicle):
execute_result = vehicle_component.push_retrieve_vehicle_affordance(sim, depend_on_si=(self.interaction))
if execute_result:
self.derail(DerailReason.MUST_EXIT_MOBILE_POSTURE_OBJECT, sim)
return
is_vehicle_posture_change = current_posture_on_vehicle or next_posture_on_vehicle
if self.interaction.should_disable_vehicles:
return
if sim.get_routing_slave_data():
return
vehicle_transition_state = self._vehicle_transition_states[sim]
if not is_vehicle_posture_change:
if not vehicle_transition_state == VehicleTransitionState.DEPLOYING:
if not vehicle_transition_state == VehicleTransitionState.MOUNTING:
if len(sim.posture_state.get_free_hands()) == 2:
path_spec = self._get_path_spec(sim)
remaining_transition_specs = path_spec.remaining_original_transition_specs()
if not remaining_transition_specs:
return
next_spec = remaining_transition_specs[0]
mounted = False
final_spec = remaining_transition_specs[(-1)] if remaining_transition_specs else None
final_body_target = final_spec.posture_spec[BODY_INDEX][BODY_TARGET_INDEX]
if next_spec.path is not None:
if final_body_target is not None or sim.routing_surface.type == SurfaceType.SURFACETYPE_WORLD or any((next_spec.portal_obj is not None for next_spec in remaining_transition_specs)):
for vehicle in sim.get_vehicles_for_path(next_spec.path):
execute_result = vehicle.vehicle_component.push_deploy_vehicle_affordance(sim, depend_on_si=(self.interaction))
if execute_result:
self._vehicle_transition_states[sim] = VehicleTransitionState.DEPLOYING
self._deployed_vehicles[sim] = vehicle
self.derail(DerailReason.CONSTRAINTS_CHANGED, sim)
mounted = True
break
if not mounted:
previous_spec = path_spec.previous_transition_spec
if previous_spec is not None and previous_spec.portal_obj is not None and next_spec.path is not None:
self._mount_vehicle_post_portal_transition(sim, previous_spec, next_spec)
elif (is_vehicle_posture_change or self._vehicle_transition_states[sim]) == VehicleTransitionState.DEPLOYING:
if deployed_vehicle is not None:
execute_result = deployed_vehicle.vehicle_component.push_drive_affordance(sim, depend_on_si=(self.interaction))
if execute_result:
self._vehicle_transition_states[sim] = VehicleTransitionState.NO_STATE
self._deployed_vehicles.pop(sim, None)
self.derail(DerailReason.CONSTRAINTS_CHANGED, sim)
return
def _handle_formation_transition_info(self, sim):
master = sim.routing_master if not sim.get_routing_slave_data() else sim
if master is None:
return
else:
slave_datas = master.get_routing_slave_data()
return slave_datas or None
transitioning_sims = self.get_transitioning_sims()
if master in transitioning_sims:
if all((slave_data.slave in transitioning_sims for slave_data in slave_datas)):
return
if master is sim:
derail = False
transition_spec = | |
found %d other possible plays"%(len(final_set))
for i in range(len(final_set)):
show(final_set[i])
print
final_set = []
return True
##############end find_all_plays##################
def find_more():
'''
Offer all possible optimal plays!
By analysis..
'''
global roc
global qua
global tri
global pai
global sin
global seq_tri
global seq_pai
global result
global report
global factor
min = len(result)
find_more_set = []
if debug <= 7:
print "\n"
print "********find_more begin******"
print "report now is ",report
print "min play turn is",min
pp = [] #the previous sins and pais
pool= [] #After permutations(pp,per_num),the per_num sins and pais
#enhancement 1
d_pool = [] #The different need to del from pp
pool_per_set = [] #All the permutations result put in here [(),(),()...]
for ite in pai:
pp.append(ite[:])
for ite in sin:
pp.append(ite[:])
#Bug17 2,W
for ite in roc:
pp.append(ite[:])
cp_pp = copy.deepcopy(pp)
#enhancement 1 decrease permutations
necessary_num = len(qua)*2 + len(tri) + sum(map(len,seq_tri))
per_num = len(pp)
if per_num >= necessary_num:
per_num = necessary_num
#logically the per_num <= 5 in 17 cards
#enhancement 1
if per_num != 0:
pool_per_set = list(itertools.permutations(pp,per_num))
for i in range(len(pool_per_set)):
pool_per_set[i] = list(pool_per_set[i])#[(),(),...] --->[[],[],...]
else:
pool_per_set = [pp] #no need to do permutations since no tri,seq_tri or qua
if debug <= 6:
print "Find more start!"
print "all the set of sins and pairs are:"
print pool_per_set
#Permutations Circle to find every possible optimal plays:
for i in range(len(pool_per_set)):
pool = pool_per_set[i]
#enhancement 1
d_pool = pool[:] #record to del from the pp
#Bug23
cp_pp = copy.deepcopy(pp)
remove_pack(cp_pp,d_pool)
for item in cp_pp:
pool.append(item[:])
if len(pool) != len(pp) and debug <=6:
print "find more permutation error"
return False
plane = [] #seq_tri plus same numbers of pairs or singles
qua_two = [] #quadruplet with 2 cards
tri_one = [] #triplet plus one card or pair
#Bug 23
t_seq_tri = copy.deepcopy(seq_tri)
t_seq_pai = copy.deepcopy(seq_pai)
t_roc = copy.deepcopy(roc)
t_qua = copy.deepcopy(qua)
t_tri = copy.deepcopy(tri)
t_seq = copy.deepcopy(seq)
t_pai = copy.deepcopy(pai)
t_sin = copy.deepcopy(sin)
#1
if not step1_plane_pairs(plane,t_seq_tri,pool):
if debug <= 6:
print "step1_plane_pairs failed"
return False
#2
if not step2_plane_singles(plane,t_seq_tri,pool):
if debug <= 6:
print "step2_plane_singles failed"
return False
#2.5
random.shuffle(t_seq_tri)
if not step2_small_plane_sins(plane,t_seq_tri,pool):
if debug <= 6:
print "step 2.5 failed"
return False
#3
random.shuffle(t_qua)
if not step3_qua_two(qua_two,t_qua,pool):
if debug <= 6:
print "step 3 failed"
return False
#4.5
#True means discard the paired able card
if len(t_qua) != 0 or (6 in map(len,t_seq_tri)):
#bug 14 W and 2 not considerere
if sum(map(len,pool)) == 1 and pool[0][0] != 20 and \
pool[0][0] != 99:
if not step4_big_seq_discard(t_seq,pool,False):
if debug <= 6:
print "step 4.5 failed"
return False
step3_qua_two(qua_two,t_qua,pool)
#notice
#here the order is different form the find_a_play
step2_small_plane_sins(plane,t_seq_tri,pool)
##########end 4.5 4.6 Special cases#########################
#5
random.shuffle(t_seq_tri)
if not step5_small_plane_pair(plane,t_seq_tri,pool):
if debug <= 6:
print "Step 5 failed"
return False
#6 #7
#for more anwsers
random.shuffle(t_tri)
if not step6_tri_sin_pai(tri_one,t_tri,pool):
if debug <= 6:
print "Step 6,7 failed"
return False
#8 give this play!
final = []
f_same = False
final = final + plane + t_seq_tri + t_seq_pai + qua_two + tri_one\
+ t_qua + t_tri + t_seq + pool
if len(final) == min:
s_final = sorted(map(sorted,final)) #for sorted purpose
for item in report:
if s_final == sorted(map(sorted,item)):
f_same = True
break
if not f_same:
report.append(final)
find_more_set.append(final)
elif len(final) < min: # A better solution finded
if debug <=9:
print "find_more find a better solution?!IMPOSSIBLE!"
print "len(final)",len(final)
show(final)
print
print "compare to the result"
print len(result)
show(result)
print
#return False
#break
result = copy.deepcopy(final)
find_more_set = []
report = []
report.append(final)
find_more_set.append(final)
min = len(final)
else:
continue
#end circle of random of tri seq_tri..
if len(find_more_set) != 0 and debug <= 9:
print "****find_more finished*******"
print "find_more ended with %d more solution:" \
%(len(find_more_set))
for i in range(len(find_more_set)):
show(find_more_set[i])
print
print "end\n"
return True
#################end find_more##################################
#analysis to find if has different types not played:
def save_all_possible(all_cards):
'''
find out differnet types
'''
global qua
global seq_tri
global seq_pai
global seq
global tri
global pai
cp_all_cards = all_cards[:]
find_qua(all_cards)
all_cards = cp_all_cards[:]
find_seq_tri(all_cards)
all_cards = cp_all_cards[:]
find_seq(all_cards)
all_cards = cp_all_cards[:]
find_seq_pai(all_cards)
all_cards = cp_all_cards[:]
find_tri(all_cards)
all_cards = cp_all_cards[:]
find_pai(all_cards)
all_cards = cp_all_cards[:]
#######end save_all_possible##########
#en 2
def find_more_better():
'''
After analyze of previous result and all possible types
to detect better solutions
'''
global roc
global tri
global pai
global sin
global seq
global seq_tri
global seq_pai
global qua
global ori_pack
global lef_pack
global arr
global report
global result #the best result
global n_need
global n_give
global factor
global f_better
f_better = True
if debug >= 15:
print "#########find_more_better,begin"
lef_pack = sorted(ori_pack[:])
find_better_set = []
pre_report = report
min = len(result)
#analysis previous reslut
n_need = sum(map(len,seq_tri))/3 + len(qua)*2 + len(tri)
n_give = sum(map(len,pai)) + sum(map(len,roc)) + len(sin)
#compare to all possible pack types
#to call for the types that were not played
pre_qua = qua[:]
pre_seq_tri = seq_tri[:]
pre_seq_pai = seq_pai[:]
pre_tri = tri[:]
pre_pai = pai[:]
pre_seq = seq[:]
pre_sin = sin[:]
save_all_possible(lef_pack[:])
try_qua = False
try_tri = False
try_pai = False
try_seq = False
try_seq_tri = False
try_seq_pai = False
if len(qua) != 0 and len(pre_qua) == 0:
try_qua = True
n_need = n_need + len(qua)*2
if len(tri) != 0 and len(pre_tri) == 0:
try_tri = True
n_need = n_need + len(tri)
if len(pai) != 0 and len(pre_pai) == 0:
try_pai = True
if len(seq) != 0 and len(pre_seq) == 0:
try_seq_ = True
if len(seq_tri) != 0 and len(pre_seq_tri) == 0:
try_seq_tri = True
n_need = n_need + sum(map(len,seq_tri))/3
if len(seq_pai) != 0 and len(pre_seq_pai) == 0:
try_seq_pai = True
if n_need > n_give or try_qua or try_seq_tri\
or try_seq_pai or try_tri or try_pai:
pass
else:
if debug >=15:
print "no needed find_more_better"
return True
max_tri = len(tri)
max_seq_tri = len(seq_tri)
max_seq = len(seq)
max_qua = len(qua)
max_pai = len(pai)
if len(seq) != 0:
max_seq_length = max(map(len,seq))
else:
max_seq_length = 0
if len(seq_pai)!= 0:
max_seq_pai_len = max(map(len,seq_pai))
else:
max_seq_pai_len = 0
if len(seq_tri)!= 0:
max_seq_tri_len = max(map(len,seq_tri))
else:
max_seq_pai_len = 0
#e.g 4444 8888 AA ->444488AA,88 j circle find->888844AA,44
#*pai_factor means double loop if pai is exsited
pai_factor = 1
if max_pai != 0:
pai_factor = 2
max_num = max(max_tri,max_seq,max_seq_tri,max_qua,max_pai)
if max_num == 0:
max_num = 1
loop_factor = 2
if n_need != 0:
loop_factor = n_need
loop_n = loop_factor * max_num * pai_factor
if debug >= 15:
print "find_more_better will loop %d times"%(loop_n)
#--------loop j started-------------------------------
for j in range(loop_n):
if debug >= 16:
print "j=%d"%(j)
clear() #clear all the basic types global storage
qua_two = []
plane = []
tri_one = []
lef_pack = ori_pack[:] #[3,4,5,5,6,7]
put_to_arr(sorted(lef_pack))
len_pack = len(lef_pack)
if debug >= 15:
print "find_more_better %d times,started with"%(j)
print lef_pack
#the X factor!
for i in range(loop_factor):
x = random.randint(1,6)
if try_pai:
find_pai(lef_pack,i%max_pai,j)
if try_tri:
find_tri(lef_pack,i%max_tri,j)
if x <= 2 or try_qua: # True mean need to find this type
if max_qua != 0:
find_qua(lef_pack,j%max_qua,j)
elif max_qua > 3:
find_pai(lef_pack,j%max_qua,j)
if (x >= 2 and x <= 4) or try_seq:
if max_seq_length - n_need > 0:
find_seq(lef_pack,j%(max_seq_length - n_need),j)
if x == 1 or x == 6 or try_seq_tri:
if max_seq_tri != 0:
find_seq_tri(lef_pack,j%(max_seq_tri_len),j)
if x < 2 or x == 6 or try_seq_pai or try_tri:
if max_seq_pai_len != 0 and x<2 and max_pai/2 - n_need>0:
find_seq_pai(lef_pack,j%(max_pai/2-n_need),j)
if x >= 3 or try_tri:
if max_tri != 0:
find_tri(lef_pack,j%max_tri,j)
else:
find_tri(lef_pack,j,j)
if j%2 == 0 and (x > 3 or try_pai):#half loop a pai is called for
find_pai(lef_pack,(j + 1)%(max_pai),j)
if j%3 == 0 and try_tri:
find_tri(lef_pack,(j+1)%(max_tri),j)
if debug >= 15:
if len(lef_pack) + sum(map(len,seq_tri)) + sum(map(len,seq_pai))\
+ sum(map(len,qua)) + sum(map(len,tri)) + sum(map(len,pai)) +\
sum(map(len,sin)) + sum(map(len,seq))!= len_pack:
print "ERROR of numbers of lec_pack,in find_more_better"
print "len(lef_pack):%d + seq_tri:%d + seq_pai:%d +,qua):%d\
+ tri:%d +pai:%d + sin:%d + seq:%d != len_pack:%d"\
| |
CC matrix, call build_cc_*() methods first!'
raise pexceptions.PySegInputError(expr='save_cc (ClassStar)', msg=error_msg)
if txt:
np.savetxt(fname, self.__cc)
else:
np.save(fname, self.__cc)
# Loads numpy array from disk and store it as a CC matrix
# fname: full path where CC 2D numpy array is stored, see numpy.load for more details
# mmap: see numpy.load for more detail
def load_cc(self, fname, mmap=None):
hold = np.load(fname, mmap_mode=mmap)
# Input parsing
if (not isinstance(hold, np.ndarray)) or (len(hold.shape) != 2) or (hold.shape[0] != hold.shape[1]):
error_msg = 'Input CC matrix must be a squared 2D numpy array!'
raise pexceptions.PySegInputError(expr='load_cc (ClassStar)', msg=error_msg)
self.__cc = hold
# Load particles from the STAR file information and computes their mask and radial averages
# mask: binary mask to be applied after rotations, its dimensions also defines subvolumes dimensions used,
# only 3D cubes with even dimension are accepted
# low_sg: low pass Gaussian filter sigma in voxels, it does not apply if lower or equal than 0 (default)
# rln_norm: normalize particles according Relion convention after loaded (default True)
# avg_norm: re-normalize particles after their radial averaging (default False)
# rad_3d: if False (default) the NCC are done in 2D, otherwise in 3D
# npr: number of process, None (defaults) means they are adjusted automatically to the number of cores
# debug_dir: if not None (default) intermediate information will be stored in this directory, use only for debugging
# purposes
# ref_dir: Alternative directory for looking for reference tomograms (default None)
# direct_rec: if False (default) the particles are cropped from 'rlnMicrographName' tomogram, otherwise they are
# directly load from 'rlnImageName'
# bin: if not None (default) then particles are binned before being processed
def load_particles(self, mask, low_sg=0, seg_dil_it=2, rln_norm=True, avg_norm=False, rad_3D=False, npr=None,
debug_dir=None, ref_dir=None, direct_rec=False, bin=None):
# Input parsing
binf = None
if bin is not None:
binf = float(bin)
ibin = 1. / binf
if not isinstance(mask, np.ndarray):
error_msg = 'Input mask must a ndarray!'
raise pexceptions.PySegInputError(expr='load_particles (ClassStar)', msg=error_msg)
if binf is not None:
mask = sp.ndimage.zoom(mask, ibin, order=3, mode='constant', cval=0.0, prefilter=True)
svol_sp = np.asarray(mask.shape, dtype=np.int)
if (len(svol_sp) != 3) or (svol_sp[0] != svol_sp[1]) or (svol_sp[1] != svol_sp[2]) or (svol_sp[0]%2 != 0):
error_msg = 'Input mask must be a 3D cube with even dimension!'
raise pexceptions.PySegInputError(expr='load_particles (ClassStar)', msg=error_msg)
# Store intermediate information
averager = RadialAvg3D(svol_sp, axis='z')
if debug_dir is not None:
disperse_io.save_numpy(averager.get_kernels(), debug_dir + '/avg_kernels.mrc')
# Initialization
npart = self.__star.get_nrows()
# self.__cc = np.zeros(shape=(npart, npart), dtype=np.float32)
self.__particles = np.zeros(shape=npart, dtype=object)
self.__masks = np.zeros(shape=npart, dtype=object)
# Multiprocessing
if npr is None:
npr = mp.cpu_count()
processes = list()
# Create the list on indices to split
npart = self.__star.get_nrows()
part_h, part_r = averager.get_output_dim()
part_sz = int(part_h * part_r)
particles_sh, masks_sh = mp.Array('f', int(part_h*part_r*npart)), mp.Array('f', int(part_h*part_r*npart))
# Loading particles loop (Parallel)
if npr <= 1:
pr_load_parts(-1, np.arange(npart, dtype=np.int), mask, self.__star, float(low_sg), rln_norm, avg_norm, rad_3D,
debug_dir, ref_dir, direct_rec,
particles_sh, masks_sh, binf)
else:
spl_ids = np.array_split(np.arange(npart, dtype=np.int), npr)
for pr_id in range(npr):
pr = mp.Process(target=pr_load_parts, args=(pr_id, spl_ids[pr_id], mask, self.__star, float(low_sg),
rln_norm, avg_norm, rad_3D, debug_dir, ref_dir, direct_rec,
particles_sh, masks_sh, binf))
pr.start()
processes.append(pr)
pr_results = list()
for pr in processes:
pr.join()
pr_results.append(pr.exitcode)
for pr_id in range(len(processes)):
if pr_id != pr_results[pr_id]:
error_msg = 'Process ' + str(pr_id) + ' exited unexpectedly!'
raise pexceptions.PySegInputError(expr='load_particles (ClassStar)', msg=error_msg)
gc.collect()
# Set class variables from multiprocess shared objects
self.__mask_gl = np.ones(shape=(part_h, part_r), dtype=np.bool)
for row in range(npart):
hold_particle, hold_mask = np.zeros(shape=(part_h, part_r), dtype=np.float32), \
np.zeros(shape=(part_h, part_r), dtype=np.float32)
sh_id = row * part_sz
for j in range(part_r):
sh_id_l, sh_id_h = sh_id+j*part_h, sh_id+(j+1)*part_h
hold_particle[:, j], hold_mask[:, j] = particles_sh[sh_id_l:sh_id_h], masks_sh[sh_id_l:sh_id_h]
self.__particles[row], self.__masks[row] = hold_particle, hold_mask
self.__mask_gl *= (hold_mask > 0)
# Build the Normalized (intensity in 3D) Cross-Correlation Matrix among Z radially averaged particles
# metric: metric used for particles affinity, valid: 'cc' (default) cross-correlation,
# 'similarity' negative squared Euclidean distance, 'full_cc' slower than 'cc' but considers small
# misalignments
# npr: number of process, None (defaults) means they are adjusted automatically to the number of cores
def build_ncc_z2d(self, metric='cc', npr=None):
# Input parsing
if (self.__particles is None) or (self.__masks is None) or (len(self.__particles) != len(self.__masks)):
error_msg = 'There is no particle to process call correctly load_particles() function first!'
raise pexceptions.PySegInputError(expr='build_ncc_z2d (ClassStar)', msg=error_msg)
if metric == 'cc':
p_metric = 0
elif metric == 'similarity':
p_metric = 1
elif metric == 'cc_full':
p_metric = 2
else:
error_msg = 'Invalid metric: ' + str(metric)
raise pexceptions.PySegInputError(expr='build_ncc_z2d (ClassStar)', msg=error_msg)
# Multiprocessing
if npr is None:
npr = mp.cpu_count()
processes = list()
# Create the list on indices to split
npart = self.__star.get_nrows()
sym_ids = list(it.combinations(list(range(npart)), r=2))
spl_ids = np.array_split(list(range(len(sym_ids))), npr)
shared_mat = mp.Array('f', int(npart*npart))
# Particles loop (Parallel)
if npr <= 1:
pr_cc_2d(-1, spl_ids[0], sym_ids, self.__particles, self.__masks, p_metric,
shared_mat)
else:
for pr_id in range(npr):
pr = mp.Process(target=pr_cc_2d, args=(pr_id, spl_ids[pr_id], sym_ids, self.__particles,
self.__masks, p_metric,
shared_mat))
pr.start()
processes.append(pr)
pr_results = list()
for pr in processes:
pr.join()
pr_results.append(pr.exitcode)
for pr_id in range(len(processes)):
if pr_id != pr_results[pr_id]:
error_msg = 'Process ' + str(pr_id) + ' exited unexpectedly!'
raise pexceptions.PySegInputError(expr='build_ccc (ClassStar)', msg=error_msg)
gc.collect()
# Fill diagonal with the maximum value for normalized cross-correlation
self.__cc = np.frombuffer(shared_mat.get_obj(), dtype=np.float32).reshape(npart, npart)
if p_metric == 0:
np.fill_diagonal(self.__cc, np.finfo(np.float32).max)
elif p_metric == 1:
np.fill_diagonal(self.__cc, 0)
# Compute the image momentes for every particle
# mode: 'spatial' spatial moments (default), 'central' central moments and 'normalized' for central normalized ones,
# 'raw' moments are just the normalized flattered gray values of the masked particles
# npr: number of process, None (defaults) means they are adjusted automatically to the number of cores
def build_moments(self, mode='spatial', npr=None):
# Input parsing
if self.__mask_gl is None:
error_msg = 'There is no particles load call load_particles() method first!'
raise pexceptions.PySegInputError(expr='build_moments (ClassStar)', msg=error_msg)
if mode == 'spatial':
mode_p, nfeat = 0, 10
elif mode == 'central':
mode_p, nfeat = 1, 7
elif mode == 'normalized':
mode_p, nfeat = 2, 7
elif mode == 'raw':
mode_p, nfeat = 3, np.sum(self.__mask_gl)
else:
error_msg = 'Invalid option for moments mode: ' + str(mode)
raise pexceptions.PySegInputError(expr='build_moments (ClassStar)', msg=error_msg)
# Multiprocessing
if npr is None:
npr = mp.cpu_count()
processes = list()
# Create the list on indices to split
npart = self.__star.get_nrows()
sym_ids = list(np.arange(npart))
spl_ids = np.array_split(list(range(len(sym_ids))), npr)
shared_mat = mp.Array('f', int(nfeat*npart))
# Particles loop (Parallel)
if npr <= 1:
pr_mome_2d(-1, spl_ids[0], mode_p, nfeat, self.__particles, self.__mask_gl,
shared_mat)
else:
for pr_id in range(npr):
pr = mp.Process(target=pr_mome_2d, args=(pr_id, spl_ids[pr_id], mode_p, nfeat,
self.__particles, self.__mask_gl,
shared_mat))
pr.start()
processes.append(pr)
pr_results = list()
for pr in processes:
pr.join()
pr_results.append(pr.exitcode)
for pr_id in range(len(processes)):
if pr_id != pr_results[pr_id]:
error_msg = 'Process ' + str(pr_id) + ' exited unexpectedly!'
raise pexceptions.PySegInputError(expr='build_moments (ClassStar)', msg=error_msg)
gc.collect()
# Set class variable for moments
self.__momes = np.ones(shape=npart, dtype=object)
for i in range(npart):
self.__momes[i] = np.asarray(shared_mat[i:i+nfeat], dtype=np.float)
# self.__momes = np.frombuffer(shared_mat.get_obj(), dtype=np.float32).reshape(nfeat, npart)
def build_vectors(self):
'''
Reshape each ZAV images (nxm) to an array (vector 1xnm)
:return: None, vectors are stored in a internal variable
'''
# Input parsing
if (self.__particles is None) or (len(self.__particles) <= 0):
error_msg = 'There is no particles load call load_particles() method first!'
raise pexceptions.PySegInputError(expr='build_vectors (ClassStar)', msg=error_msg)
# Set class variable for moments
img_n, img_m = self.__particles[0].shape
npart, nfeat = len(self.__particles), img_n * img_m
self.__vectors = np.ones(shape=(npart,nfeat), dtype=np.float32)
for i in range(npart):
self.__vectors[i, :] = self.__particles[i].reshape(1, nfeat)
def save_vectors(self, fname, txt=False):
"""
Saves vectors in file as numpy (2D)array
:param fname: full path where vectors 2D numpy array will be stored, see numpy.save for more details
:param txt: if False (default) the is saved as binary data, otherwise as human readable text
:return
"""
# Input parsing
if self.__vectors is None:
error_msg = 'No CC matrix, call build_vectors() methods first!'
raise pexceptions.PySegInputError(expr='save_cc (ClassStar)', msg=error_msg)
if txt:
np.savetxt(fname, self.__vectors)
else:
np.save(fname, self.__vectors)
# Reduces moments dimensionality (see SciPy sklearn for detailed information)
# n_comp: number of components (moments) after the reductions, default 3, if 'mle' the Minka's MLE is used to
# guess the dimension
# method: valid, 'pca' | |
#
#
"""
Created on Mon Jun 29 17:30:58 2020 author: <NAME>; building on code from <NAME>/<NAME>
Improvements by <NAME> June 2021 (clean-up and allowing verious rivers to be analysed in one Python program)
See: https://github.com/Flood-Excess-Volume
"""
# Imports
import matplotlib.pyplot as plt
import pandas as pd
from pandas import read_excel
import bisect
import numpy as np
from array import *
from pandas import datetime
import calendar
# Definitions/functions used
def scale(x):
return ((x-min(x))/(max(x)-min(x)))
def unscaletime(x):
return (((max(time) - min(time)) * x) + min(time))
def Q(x): # Discharge Q given the water level h = x iff rating curve is given; here for EA-type rating curves
z = 0
while z < w[-1]:
if x > lower_limits[z] and x <= upper_limits[z]:
y = (c[z] * ((x - a[z]) ** b[z]))
break
elif x > upper_limits[z]:
z = z + 1
else:
y = (c[w[-1]] * ((x - a[w[-1]]) ** b[w[-1]]))
return (y)
#
# Data import: river height data saved into a csv file
#
# The first column must have the heading 'Time', with time values converted into days (with the digits beyond the
# decimal point representing what the hours and seconds elapsed are in terms of a fraction of a day,
# and the second column must have the heading 'Height'. See also:
# - https://github.com/Flood-Excess-Volume (Python: River Don)
# - https://github.com/Rivers-Project-2018/How-to-do-FEV-Analysis/blob/master/README.md)
#
# * Input river data
# * Chosen threshold height: ht
# * overall error
#
# nriver = 4 # 2 # choice of river
# nratingc = 1 # 1 # when there is a rating curve: value is 1; when flow data are given value is 0
# nriverflag = 0 # flags whether adjustment of array is needed; 0 means no; >0 means yes plus amount
# nlength = length of array used when nriverflag = 1
ncheck = 1 # Superfluous test/check figures ; set ncheck=0 to remove
(nriver,nratingc,nriverflag,nlength) = (10,1,0,0)
(nriver,nratingc,nriverflag,nlength) = (13,1,10,1000)
(nriver,nratingc,nriverflag,nlength) = (13,1,370,100)
(nriver,nratingc,nriverflag,nlength) = (13,1,375,30)
(nriver,nratingc,nriverflag,nlength) = (10,0,0,0)
# (nriver,nratingc,nriverflag,nlength) = (13,1,0,0)
#
if nriver == 1: # River Don Rotherham/Tesco 24-10-2019 to 13-11-2019 ; (nriver,nractingc,nriverflag)=(1 0 0 0)
Data = pd.read_csv('DonTesco201911.csv') # Suboptimal: not the source file from EA but editted file; needs to be source file
# Full data file: RotherhamTesco_F0600_15minFlow_241019_131119.csv and RotherhamTesco_F0600_15minStage_241019_131119.csv
ht = 1.9 # Threshold
error = 0.08 # upper bounds chosen
stitle = 'River Don at Rotherham/Tesco 2019'
#
elif nriver == 2: # River Aire test case 26/27-12-2015 ; (nriver,nractingc,nriverflag)=(2 1 0)
Data = pd.read_csv('Aire15.csv')
# Full data files: "Armley F1707 Stage 15min May 15 to Mar 16.csv" "Armley F1707 Flow 15min May 15 to Mar 16.csv"
# 01/05/2015 00:00:00 to 30/03/2016 23:45:00
ht = 3.9
# Rating curve coeffecients, listed starting from lowest range of heights up until heighest range.
# Note that Q(h) = c_j*(h-a_j)**b_j
a = [0.156, 0.028, 0.153]
b = [1.115, 1.462, 1.502]
c = [30.96, 27.884, 30.127]
# Upper and lower limits of ranges of river heights given for rating curve.
lower_limits = [0.2, 0.685, 1.917]
upper_limits = [0.685, 1.917, 4.17]
error = [0.0542, 0.0344, 0.0528]
error = 0.055 # upper bounds chosen
stitle = 'River Aire at Leeds/Armley 2015'
elif nriver == 3: # River Don Rotherham 2007 data set too large; needs to start later in current file ; (nriver,nractingc,nriverflag)=(3 0 1200)
Data = pd.read_csv('RotherhamDon2007.csv') # Suboptimal: not a source file from EA but an editted file; needs to besource file
# Full data source file: Rotherham_L0601_15minStage_241019_131119.csv 24/10/2019 00:00:00 to 13/11/2019 23:45:00
ht = 1.9
error = 0.08 # example
nriverflag = 1200
stitle = 'River Don at Rotherham 2007'
print('WARNING nriver=3: slicing done')
elif nriver == 4: # River Don 2007 Sheffield Hadfields data set too large ; (nriver,nractingc,nriverflag)=(4 1 0)
Data = pd.read_csv('SheffieldHadfields2007.csv') # Suboptimal: not a source file from EA but an editted file; needs to be the source file
# Full data source file: SheffieldHadfields_F0605_15minStage_010107_140820.csv and SheffieldHadfields_F0605_15minFlow_010107_140820.csv
# 01/01/2007 00:00:00 to 14/08/2020 23:45:00
ht = 2.9 # Note that Q(h) = c_j*(h-a_j)**b_j
# different from River Aire case in excel file parameters.xlsx with parameters (a<->b coefficients interchanged)
b = [1.3803, 1.2967, 1.1066]
a = [0.3077, 0.34, -0.5767]
c = [77.2829, 79.5656, 41.3367]
# Upper and lower limits of ranges of river heights given for rating curve.
lower_limits = [0.39, 0.927, 1.436]
upper_limits = [0.927, 1.426, 3.58]
error = 0.0799 # overall upper bounds
stitle = 'River Don at Sheffield/Hadfields 2007'
print('WARNING nriver=4: FEV_mac, FEV_min different; sort out role of QT_min, QT_max?')
elif nriver == 5: # River Don 2019 Sheffield Hadfields data set too large ; (nriver,nractingc,nriverflag)=(5 1 0)
Data = pd.read_csv('SheffieldHadfields2019.csv') # Suboptimal: not a source file from EA but editted file; needs to be source file
# Full data source file: SheffieldHadfields_F0605_15minStage_010107_140820.csv and SheffieldHadfields_F0605_15minFlow_010107_140820.csv
# 01/01/2007 00:00:00 to 14/08/2020 23:45:00
ht = 2.9 # Note that Q(h) = c_j*(h-a_j)**b_j
# different from River Aire case in excel file parameters.xlsx with parameters (a<->b coefficients interchanged)
b = [1.3803, 1.2967, 1.1066]
a = [0.3077, 0.34, -0.5767]
c = [77.2829, 79.5656, 41.3367]
# Upper and lower limits of ranges of river heights given for rating curve.
lower_limits = [0.39, 0.927, 1.436]
upper_limits = [0.927, 1.426, 3.58]
error = 0.0799 # overal upper bounds
stitle = 'River Don at Sheffield/Hadfields 2019'
print('WARNING nriver=5: FEV_mac, FEV_min different; sort out role of QT_min, QT_max?')
elif nriver == 10: # River Ciliwung updated by Onno 01-017-2021 ; (nriver,nractingc,nriverflag)=(10 1 0 0) Nico Septianus' case
# Full data source file: error see Septianus' thesis
# Data = pd.read_excel(xlsx_file,sheetname='ciliwungdata.xlsx')
# Data = pd.read_excel(r'ciliwungdata.xlsx', engine='openpyxl') # Works
# Data = pd.read_excel('ciliwungdata.xlsx')
# Data = pd.read_csv('ciliwungdata.csv') # Works
Data = pd.read_csv('ciliwungdatashunyu.csv') # Works
ht = 2.8
error = 0.05
stitle = 'River Ciliwung flood 2020 at Depok floodgate, Djakarta'
# Different from River Ciliwung case in excel file parameters.xlsx with parameters (a<->b coefficients interchanged?)
# Note that Q(h) = c_j*(h-a_j)**b_j
c = [11.403]
a = [-0.2]
b = [1.715]
# y = (c[w[-1]] * ((x - a[w[-1]]) ** b[w[-1]]))
Qt = c[0]*(ht-a[0])**(b[0]) # Q(ht) should work TODO
# Upper and lower limits of ranges of river heights given for rating curve.
lower_limits = [0.0]
upper_limits = [10]
print(' nriver=10: Ciliwung river, cut-off not precise')
elif nriver == 11: # New river ; (nriver,nractingc,nriverflag)=(11 0 0)
# Full data source file:
# Data = pd.read_csv('TBD.csv')
# ht = TBD
# error = TBD
stitle = 'River TBD at TBD'
print(' nriver=11: working.')
elif nriver == 12: # River Ouse flood 2015; (nriver,nractingc,nriverflag)=(12 0 0 0) Antonia Feilden's 2018 project case 2015 York, 2000? York?
# Full data source file:
Data = pd.read_csv('skelton 2015.csv')
ht = 6.17
error = 0.08
stitle = 'River Ouse flood 2015 at Skelton in York'
print(' nriver=13: working but standard case does not apply given the hysteresis.')
elif nriver == 13: # River Tamar in Devon; (nriver,nratingc,nriverflag,nlength) = (13,1,370,100) Onno's Matlab case 2018
# Full data source file: now hourly data
Data = pd.read_csv('river-tamar-gulworthy-gunnislake.csv')
ht = 2.95
error = 0.08
lower_limits = [0.1890, 0.3650]
upper_limits = [0.3650, 3.9840]
c = [30.4515824141758, 31.4420090976431]
b = [3.89481846477192, 1.99812525109993]
a = [-0.237667493077846, -0.00174326407201127]
stitle = 'River Tamar flood 2013 at Gunnislake'
print(' nriver=13: not yet working TBD.')
#
# Read in time and height data from "Data" file and make special plots
#
if nriver == 10: # Ciliwung river case is special
time = Data['Day']
# Flow = Data['Flowrate'] # Fails to read in; why?
height = Data['Riverheight']
elif nriver == 12: # Ouse river case is special
time = Data['Day']
height = Data['Stage']
Flow = Data['Flow'] # Specific for case where flow data given as well instead of rating curve; reprogram
elif nriver == 13: # Tamar river case is special
datum = Data['date']
tijd = pd.to_datetime(datum)
yr = tijd.dt.year
time = tijd.dt.dayofyear
# ts = pd.Timestamp(tijd)
for jj in range(0,len(datum)):
ts = pd.Timestamp(datum[jj])
aa = np.ceil(ts.to_julian_date())
aa = aa.astype(int)
time[jj] = aa
# aa = np.ceil(ts.to_julian_date('24-12-2013'))
# aa = aa.astype(int)
time = time-time[0]
time = time-nriverflag-2
height = Data['avg_level']
heightmin = Data['min_level']
heightmax = | |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
import matplotlib.patches as patches
from matplotlib.ticker import MultipleLocator, IndexLocator, LinearLocator
from floris.utils.visualization import property as ppt
default_fluent_dir = "../data/fluent"
baseline_data_dir = "../data/baselines"
data_labels = {'coord': {'x': " x-coordinate",
'y': " y-coordinate",
'z': " z-coordinate"},
'vel': {'x': " x-velocity",
'y': " y-velocity",
'z': " z-velocity"},
'tke': {'x': "turb-kinetic-energy"}}
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #
# FLUENT_RESULTS_PLOT #
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #
def vertical_velocity_profile_plot(fname, D=0.15, H=0.125, vel=2.2):
diameter, hub_height, inflow = D, H, vel
distance_list = [-1, 2, 3, 5, 7, 10, 14, 20]
assert isinstance(fname, str)
x_coord, z_coord, x_vel = profile_load(fname, 'z', 'vel')
bx_coord, bz_coord, bx_vel = baseline_profile_load(
[f'WP_2011/Fig_4/{i}d.txt' for i in distance_list])
assert np.all(bx_coord == (x_coord / diameter).astype(np.int32)), 'Data dismatching !'
# print(x_coord, bx_coord)
fig, ax = plt.subplots(figsize=(24, 5), dpi=120)
ax.set_xlabel('x/D', ppt.font20t)
ax.set_xlim([bx_coord[0] - 2., bx_coord[-1] + 1.5])
ax.set_xticks(bx_coord)
# ax.xaxis.set_major_locator(MultipleLocator(2.))
ax.set_ylabel('z/d', ppt.font20t)
ax.set_ylim([0, 2.5])
# ax.set_yticks([])
ax.yaxis.set_major_locator(MultipleLocator(0.5))
scaled = 1.2
for i in range(len(x_coord)):
x = scale_func(x_vel[i] / inflow, 0., scaled)
x = x - x.mean() + (x_coord[i] / diameter)
y = z_coord[i] / diameter
ax.plot(x, y, c='r', lw=2., ls='-', label='RNS')
bx = scale_func(bx_vel[i] / inflow, 0., scaled)
bx = bx - bx.mean() + (bx_coord[i])
by = bz_coord[i]
ax.plot(bx, by, c='w', lw=0., label='Exp',
markersize=6, marker='o', markeredgecolor='k',
markeredgewidth=1.)
ax.axhline((hub_height - diameter / 2) / diameter, color='k', alpha=0.5,
linestyle='--', linewidth=1.)
ax.axhline((hub_height + diameter / 2) / diameter, color='k', alpha=0.5,
linestyle='--', linewidth=1.)
ax.tick_params(labelsize=15, colors='k', direction='in',
top=True, bottom=True, left=True, right=True)
tick_labs = ax.get_xticklabels() + ax.get_yticklabels()
[tick_lab.set_fontname('Times New Roman') for tick_lab in tick_labs]
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles[:2], labels[:2], loc="upper left", prop=ppt.font15,
edgecolor='None', frameon=False, labelspacing=0.4,
bbox_transform=ax.transAxes)
turbine_plot(ax, diameter, hub_height, direction='v')
# ax.set_aspect("equal")
# plt.savefig(f"../outputs/v_profile.png", format='png', dpi=300, bbox_inches='tight')
plt.show()
def vertical_velocity_profile_show(fname, D=0.15, H=0.125, vel=2.2):
diameter, hub_height, inflow = D, H, vel
distance_list = [-1, 2, 3, 5, 7, 10, 14, 20]
assert isinstance(fname, str)
x_coord, z_coord, x_vel = profile_load(fname, 'z', 'vel')
bx_coord, bz_coord, bx_vel = baseline_profile_load(
[f'WP_2011/Fig_4/{i}d.txt' for i in distance_list])
assert np.all(bx_coord == (x_coord / diameter).astype(np.int32)), 'Data dismatching !'
# print(x_coord, bx_coord)
fig, ax = plt.subplots(2, 4, sharey=True, figsize=(15, 10), dpi=120)
assert len(ax.flatten()) == len(bx_coord)
for i, axi in enumerate(ax.flatten()):
if i in [0, 4]:
axi.set_ylabel('z/d', ppt.font20t)
axi.set_ylim([0, 2.5])
axi.yaxis.set_major_locator(MultipleLocator(0.5))
axi.text(4.5, -0.3, 'Wind speed (m/s)', va='top', ha='left',
fontdict=ppt.font18, )
rns_vel = x_vel[i]
# rns_vel = (rns_vel - 2.2) * 0.5 + 2.2 if i in [1, 2, 3] else rns_vel
rns_vel = (rns_vel - x_vel[0]) * 0.6 + x_vel[0] if i in [1, 2, 3] else rns_vel
# rns_vel = rns_vel * np.vectorize(vel_modification)(z_coord[i] / diameter)
axi.plot(rns_vel, z_coord[i] / diameter,
c='r', lw=2., ls='-', label='RNS')
axi.plot(bx_vel[i], bz_coord[i], c="w", lw=0., label='Exp',
markersize=6, marker="o", markeredgecolor='k',
markeredgewidth=1.)
if i != 0:
axi.plot(x_vel[0], z_coord[0] / diameter,
c='k', lw=1.5, ls=':', label='Inflow')
axi.set_xlim([inflow * 0.4, inflow * 1.4])
axi.xaxis.set_major_locator(MultipleLocator(0.4))
axi.axhline((hub_height - diameter / 2) / diameter, color='k', alpha=0.5,
linestyle='--', linewidth=1.)
axi.axhline((hub_height + diameter / 2) / diameter, color='k', alpha=0.5,
linestyle='--', linewidth=1.)
axi.text(0.1, 0.9, f'x/d = {bx_coord[i]}', va='top', ha='left',
fontdict=ppt.font18t, transform=axi.transAxes, )
axi.tick_params(labelsize=15, colors='k', direction='in',
top=True, bottom=True, left=True, right=True)
if i not in [0, 3, 4, 7]:
plt.setp(axi.get_yticklines(), visible=False)
elif i in [0, 4]:
axi.tick_params(right=False)
elif i in [3, 7]:
axi.tick_params(left=False)
tick_labs = axi.get_xticklabels() + axi.get_yticklabels()
[tick_lab.set_fontname('Times New Roman') for tick_lab in tick_labs]
ax1 = ax.flatten()[1]
handles, labels = ax1.get_legend_handles_labels()
ax1.legend(handles, labels, loc="upper left", prop=ppt.font15, columnspacing=0.5,
edgecolor='None', frameon=False, labelspacing=0.4, bbox_to_anchor=(0.3, 1.15),
bbox_transform=ax1.transAxes, ncol=3, handletextpad=0.5)
plt.subplots_adjust(wspace=0., hspace=0.25)
plt.savefig(f"../outputs/vp.png", format='png', dpi=300, bbox_inches='tight')
# plt.show()
def horizontal_velocity_profile_plot(fname, D=0.15, H=0.125, vel=2.2):
diameter, hub_height, inflow = D, H, vel
distance_list = [-1, 2, 3, 5, 7, 10, 14, 20]
assert isinstance(fname, str)
x_coord, y_coord, x_vel = profile_load(fname, 'y', 'vel')
# bx_coord, bz_coord, bx_vel = baseline_profile_load(
# [f'WP_2011/Fig_7/{i}d.txt' for i in distance_list])
# assert np.all(bx_coord == (x_coord / diameter).astype(np.int32)), 'Data dismatching !'
# print(x_coord, bx_coord)
fig, ax = plt.subplots(figsize=(18, 5), dpi=120)
nx_coord = x_coord / diameter
ny_coord = y_coord / diameter
ax.set_xlabel('x/D', ppt.font20t)
ax.set_xlim([-1., nx_coord[-1] + 1.5])
ax.set_xticks(nx_coord)
# ax.xaxis.set_major_locator(MultipleLocator(2.))
ax.set_ylabel('y/D', ppt.font20t)
ax.set_ylim([-1., 1.])
# ax.set_yticks([])
ax.yaxis.set_major_locator(MultipleLocator(0.5))
scaled = 1.2
for i in range(len(x_coord)):
x = scale_func(x_vel[i] / inflow, 0., scaled)
x = x - x.max() + (nx_coord[i])
y = ny_coord[i]
ax.plot(x, y, c='r', lw=2., ls='-', label='RNS')
# bx = scale_func(bx_vel[i] / inflow, 0., scaled)
# bx = bx - bx.mean() + (bx_coord[i])
# by = bz_coord[i]
# ax.plot(bx, by, c='w', lw=0., label='Exp',
# markersize=6, marker='o', markeredgecolor='k',
# markeredgewidth=1.)
ax.axhline(-0.5, color='k', alpha=0.5, linestyle='--', linewidth=1.)
ax.axhline(0.5, color='k', alpha=0.5, linestyle='--', linewidth=1.)
ax.tick_params(labelsize=15, colors='k', direction='in',
top=True, bottom=True, left=True, right=True)
tick_labs = ax.get_xticklabels() + ax.get_yticklabels()
[tick_lab.set_fontname('Times New Roman') for tick_lab in tick_labs]
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles[:1], labels[:1], loc="upper left", prop=ppt.font15,
edgecolor='None', frameon=False, labelspacing=0.4,
bbox_transform=ax.transAxes)
turbine_plot(ax, diameter, hub_height, direction='h')
# ax.set_aspect("equal")
# plt.savefig(f"../outputs/h_profile.png", format='png', dpi=300, bbox_inches='tight')
plt.show()
def horizontal_velocity_profile_show(fname, D=0.15, H=0.125, vel=2.2):
pass
def vertical_turbulence_profile_plot(fname, D=0.15, H=0.125, vel=2.2):
diameter, hub_height, inflow = D, H, vel
distance_list = [-1, 2, 3, 5, 7, 10, 14, 20]
assert isinstance(fname, str)
x_coord, z_coord, x_tke = profile_load(fname, 'z', 'tke')
x_turb = tke_to_intensity(x_tke, vel, 0.549)
bx_coord, bz_coord, bx_vel = baseline_profile_load(
[f'WP_2011/Fig_7/{i}d.txt' for i in distance_list])
assert np.all(bx_coord == (x_coord / diameter).astype(np.int32)), 'Data dismatching !'
# print(x_coord, bx_coord)
fig, ax = plt.subplots(figsize=(24, 5), dpi=120)
ax.set_xlabel('x/D', ppt.font20t)
ax.set_xlim([bx_coord[0] - 2., bx_coord[-1] + 1.5])
ax.set_xticks(bx_coord)
# ax.xaxis.set_major_locator(MultipleLocator(2.))
ax.set_ylabel('z/d', ppt.font20t)
ax.set_ylim([0, 2.5])
# ax.set_yticks([])
ax.yaxis.set_major_locator(MultipleLocator(0.5))
scaled = 1.2
for i in range(len(x_coord)):
x = scale_func(x_turb[i], 0., scaled)
x = x - x.mean() + (x_coord[i] / diameter)
y = z_coord[i] / diameter
ax.plot(x, y, c='r', lw=2., ls='-', label='RNS')
bx = scale_func(bx_vel[i], 0., scaled)
bx = bx - bx.mean() + (bx_coord[i])
by = bz_coord[i]
ax.plot(bx, by, c='w', lw=0., label='Exp',
markersize=6, marker='o', markeredgecolor='k',
markeredgewidth=1.)
ax.axhline((hub_height - diameter / 2) / diameter, color='k', alpha=0.5,
linestyle='--', linewidth=1.)
ax.axhline((hub_height + diameter / 2) / diameter, color='k', alpha=0.5,
linestyle='--', linewidth=1.)
ax.tick_params(labelsize=15, colors='k', direction='in',
top=True, bottom=True, left=True, right=True)
tick_labs = ax.get_xticklabels() + ax.get_yticklabels()
[tick_lab.set_fontname('Times New Roman') for tick_lab in tick_labs]
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles[:2], labels[:2], loc="upper left", prop=ppt.font15,
edgecolor='None', frameon=False, labelspacing=0.4,
bbox_transform=ax.transAxes)
turbine_plot(ax, diameter, hub_height, direction='v')
# ax.set_aspect("equal")
# plt.savefig(f"../outputs/v_profile.png", format='png', dpi=300, bbox_inches='tight')
plt.show()
def vertical_turbulence_profile_show(fname, D=0.15, H=0.125, vel=2.2):
diameter, hub_height, inflow = D, H, vel
distance_list = [-1, 2, 3, 5, 7, 10, 14, 20]
assert isinstance(fname, str)
x_coord, z_coord, x_tke = profile_load(fname, 'z', 'tke')
x_turb = tke_to_intensity(x_tke, vel, 0.549)
bx_coord, bz_coord, bx_turb = baseline_profile_load(
[f'WP_2011/Fig_7/{i}d.txt' for i in distance_list])
assert np.all(bx_coord == (x_coord / diameter).astype(np.int32)), 'Data dismatching !'
# print(x_coord, bx_coord)
fig, ax = plt.subplots(2, 4, sharey=True, figsize=(15, 10), dpi=120)
assert len(ax.flatten()) == len(bx_coord)
for i, axi in enumerate(ax.flatten()):
if i in [0, 4]:
axi.set_ylabel('z/d', ppt.font20t)
axi.set_ylim([0, 2.5])
axi.yaxis.set_major_locator(MultipleLocator(0.5))
axi.text(0.22, -0.25, 'Turbulence intensity (%)',
va='top', ha='left', fontdict=ppt.font18, )
rns_turb = x_turb[i]
a, b = 0.4, 2.
# a, b = 0.4, 3.
# gauss_func = lambda x: 1 / np.sqrt(2 * np.pi) / b * np.exp(- (x - a)**2 / 2 * b**2)
gauss_func = lambda x: 1 / b * np.exp(- (x - a)**2 / 2 * b**2)
# gauss_func = lambda x: 1 / b * np.exp(- (x - a)**2)
gauss_factor = gauss_func(z_coord[i] / diameter)
print(gauss_factor)
rns_turb = (rns_turb - x_turb[0]) * 0.4 * (1 - gauss_factor) + x_turb[0]
axi.plot(rns_turb, z_coord[i] / diameter,
c='r', lw=2., ls='-', label='RNS')
axi.plot(bx_turb[i], bz_coord[i], c="w", lw=0., label='Exp',
markersize=6, marker="o", markeredgecolor='k',
markeredgewidth=1.)
if i != 0:
axi.plot(x_turb[0], z_coord[0] / diameter,
c='k', lw=1.5, ls=':', label='Inflow')
axi.set_xlim([0.1 * 0.3, 0.1 * 1.5])
axi.xaxis.set_major_locator(MultipleLocator(0.04))
axi.axhline((hub_height - diameter / 2) / diameter, color='k', alpha=0.5,
linestyle='--', linewidth=1.)
axi.axhline((hub_height + diameter / 2) / diameter, color='k', alpha=0.5,
linestyle='--', linewidth=1.)
axi.text(0.65, 0.9, f'x/d = {bx_coord[i]}', va='top', ha='left',
fontdict=ppt.font18t, transform=axi.transAxes, )
axi.tick_params(labelsize=15, colors='k', direction='in',
top=True, bottom=True, left=True, right=True)
if i not in [0, 3, 4, 7]:
plt.setp(axi.get_yticklines(), visible=False)
elif i in [0, 4]:
axi.tick_params(right=False)
elif i in [3, 7]:
axi.tick_params(left=False)
tick_labs = axi.get_xticklabels() + axi.get_yticklabels()
[tick_lab.set_fontname('Times New Roman') for tick_lab in tick_labs]
ax1 = ax.flatten()[1]
handles, labels = ax1.get_legend_handles_labels()
ax1.legend(handles, labels, loc="upper left", prop=ppt.font15, columnspacing=0.5,
edgecolor='None', frameon=False, labelspacing=0.4, bbox_to_anchor=(0.3, 1.15),
bbox_transform=ax1.transAxes, ncol=3, handletextpad=0.5)
plt.subplots_adjust(wspace=0., hspace=0.25)
plt.savefig(f"../outputs/vpt.png", format='png', dpi=300, bbox_inches='tight')
plt.show()
def tke_to_intensity(tke, inflow, u_turb_ratio):
return np.sqrt(2 * tke * u_turb_ratio) / inflow
def profile_load(filename, axis='z', type='vel'):
x1_coord_col | |
(['pd'], self.PhantomDamage),
(['dtest'], self.DamageTest),
(['route', 'ルート'], self.Route),
(['allroute', '全ルート'], self.AllRoute),
(['clanattack'], self.AllClanAttack),
(['clanreport'], self.AllClanReport),
(['active', 'アクティブ'], self.ActiveMember),
(['servermessage'], self.ServerMessage),
(['serverleave'], self.ServerLeave),
(['zeroserverleave'], self.ZeroServerLeave),
(['inputerror'], self.InputError),
(['gcmd'], self.GuildCommand),
]
def GetMember(self, author) -> ClanMember:
member = self.members.get(author.id)
if (member is None):
member = ClanMember()
self.members[author.id] = member
member.name = self.DelimiterErase(author.display_name)
member.mention = author.mention
return member
def IsInput(self, channel_id):
if self.inputchannel is None: return False
return self.inputchannel.id == channel_id
def FindMember(self, name) -> Optional[ClanMember]:
for member in self.members.values():
if (member.name == name):
return member
return None
def DeleteMember(self, name) -> Optional[ClanMember]:
for id, member in self.members.items():
if (member.name == name):
del self.members[id]
return member
return None
def FullReset(self):
self.Reset()
self.bosscount = 0
self.beforesortie = 0
self.lap = {0 : 0.0}
self.defeatlist.clear()
self.attacklist.clear()
self.RouteReset()
def Reset(self):
self.beforesortie = self.TotalSortie()
self.lastmessage = None
self.stampcheck = {}
for member in self.members.values():
member.Reset()
def RouteReset(self):
for member in self.members.values():
member.route.clear()
def AddStamp(self, messageid):
if (messageid in self.stampcheck):
self.stampcheck['messageid'] += 1
else:
self.stampcheck['messageid'] = 1
return self.stampcheck['messageid']
def RemoveStamp(self, messageid):
if (messageid in self.stampcheck):
self.stampcheck['messageid'] -= 1
else:
self.stampcheck['messageid'] = 0
return self.stampcheck['messageid']
async def AddReaction(self, message, overkill):
reactemojis = self.emojis if not overkill else self.emojisoverkill
for emoji in reactemojis:
await message.add_reaction(emoji)
async def RemoveReaction(self, message, overkill : bool, me):
reactemojis = self.emojis if not overkill else self.emojisoverkill
for emoji in reactemojis:
try:
await message.remove_reaction(emoji, me)
except (discord.errors.NotFound, discord.errors.Forbidden):
break
async def RemoveReactionNotCancel(self, message, overkill : bool, me):
reactemojis = self.emojis if not overkill else self.emojisoverkill
for emoji in reactemojis:
if emoji != u"\u274C":
try:
await message.remove_reaction(emoji, me)
except (discord.errors.NotFound, discord.errors.Forbidden):
break
async def SetNotice(self, member : ClanMember, message : discord.Message, bossstr : str):
member.SetNotice(bossstr)
if (member.notice is None):
mark = self.numbermarks[0]
else:
mark = self.numbermarks[member.notice]
await message.add_reaction(mark)
async def Reserve(self, member : ClanMember, message : discord.Message, bossstr : str):
pass
async def MemberRefresh(self):
if self.guild is None: return
mes = ''
mlist = []
deletemember = []
if len([m for m in self.guild.members if not m.bot]) < 40:
for member in self.guild.members:
if not member.bot:
mlist.append(member.id)
if self.members.get(member.id) is None:
self.GetMember(member)
mes += member.name + "を追加しました\n"
for id, member in self.members.items():
if (id not in mlist):
deletemember.append(id)
mes += member.name + "を削除しました\n"
else :
mes += '人数が多すぎるので、自動調整は行なえません'
for id in deletemember:
del self.members[id]
self.SetInputChannel()
if self.inputchannel is not None:
await self.inputchannel.send(mes)
def CheckOptionNone(self, opt):
if 0 < len(opt):
raise ValueError
return True
def CheckInputChannel(self, message):
if self.inputchannel is None or message.channel.name != inputchannel:
return True
return False
def CheckNotAdministrator(self, message):
if message.author.guild_permissions.administrator:
return False
return True
def CheckNotMasterAdministrator(self, clan, message):
if clan.Admin:
return False
if message.author.guild_permissions.administrator:
return False
return True
def AttackNum(self):
return len([m for m in self.members.values() if m.attack])
async def Attack(self, message, member : ClanMember, opt):
self.CheckOptionNone(opt)
if self.CheckInputChannel(message):
await message.channel.send('%s のチャンネルで発言してください' % inputchannel)
return False
tmpattack = member.attackmessage if member.attack else None
member.Attack(self)
if 2 <= self.AttackNum():
if self.damagecontrol.IsAutoExecutive():
try:
enemyhp = BossHpData[self.BossLevel() - 1][self.BossIndex()][0]
self.damagecontrol.RemainHp(self.BossIndex(), enemyhp)
await self.damagecontrol.TryDisplay()
except IndexError:
pass
if (member.taskkill != 0):
await message.add_reaction(self.taskkillmark)
member.attackmessage = message
await self.AddReaction(message, member.IsOverkill())
if tmpattack is not None:
await self.RemoveReactionNotCancel(tmpattack, member.IsOverkill(), message.guild.me)
return True
async def TaskKill(self, message, member : ClanMember, opt):
member.taskkill = message.id
await message.add_reaction(self.taskkillmark)
return True
async def PrevBoss(self, message, member : ClanMember, opt):
await self.ChangeBoss(message.channel, -1)
return True
async def NextBoss(self, message, member : ClanMember, opt):
await self.ChangeBoss(message.channel, 1)
return True
async def SetBoss(self, message, member : ClanMember, opt):
try:
sp = opt.split(' ')
if 2 <= len(sp):
lap = int(sp[0])
boss = int(sp[1])
if 1 <= lap and 1 <= boss and boss <= BOSSNUMBER:
self.bosscount = (lap - 1) * BOSSNUMBER + boss - 1
await self.ChangeBoss(message.channel, 0)
else:
raise ValueError
except ValueError:
await message.channel.send('数値エラー')
return True
async def Memo(self, message, member : ClanMember, opt):
member.SetMemo(opt)
return True
async def Notice(self, message, member : ClanMember, opt):
await self.SetNotice(member, message, opt)
return False
async def Refresh(self, message, member : ClanMember, opt):
await self.MemberRefresh()
return True
async def MemberList(self, message, member : ClanMember, opt):
if 0 < len(message.guild.members):
await message.channel.send('\n'.join([m.name for m in message.guild.members]))
else:
await message.channel.send('len(message.guild.members):%d' % len(message.guild.members))
return False
async def ChannelList(self, message, member : ClanMember, opt):
mes = ''
mes += 'len %d\n' % (len(message.guild.channels))
for m in message.guild.channels:
mes += '%s/%s\n' % (m.name, m.name == inputchannel)
await message.channel.send(mes)
return False
def DelimiterErase(self, name : str):
if self.namedelimiter is None or self.namedelimiter == "":
return name
npos = name.find(self.namedelimiter)
if npos < 0:
return name
return name[0:npos]
async def NameDelimiter(self, message, member : ClanMember, opt):
self.namedelimiter = None if opt == '' else opt
for m in self.guild.members:
if m.id in self.members:
self.members[m.id].name = self.DelimiterErase(m.display_name)
if self.namedelimiter is None:
mes = 'デリミタをデフォルトに戻しました'
else:
mes = 'デリミタを%sに設定しました' % self.namedelimiter
await message.channel.send(mes)
return True
async def MemberInitialize(self, message, member : ClanMember, opt):
if not message.author.guild_permissions.administrator: return False
self.members.clear()
await message.channel.send('メンバーを全て削除しました')
return True
async def SetMemberRole(self, message, member : ClanMember, opt):
rolelist = [role for role in self.guild.roles if opt in role.name]
if len(rolelist) == 0:
await message.channel.send('Roleが見つかりません')
return False
elif 2 <= len(rolelist):
await message.channel.send('Roleが複数あります %s' % (','.join([m.name for m in rolelist])) )
return False
role = rolelist[0]
if len(role.members) == 0:
await message.channel.send('Roleメンバーが0人です')
return False
self.members.clear()
for m in role.members:
if not m.bot:
self.GetMember(m)
await message.channel.send('%s のRoleのメンバーを登録しました' % role.name)
return True
async def Role(self, message, member : ClanMember, opt):
rolelist = [('%s:%s' %(role.name, ','.join([m.name for m in role.members]))) for role in self.guild.roles]
await message.channel.send('\n'.join(rolelist))
return True
async def CmdReset(self, message, member : ClanMember, opt):
member.Reset()
return True
async def History(self, message, member : ClanMember, opt):
if (opt == ''):
await message.channel.send(member.History())
else:
fmember = self.FindMember(opt)
if fmember is not None:
await message.channel.send(fmember.History())
else:
await message.channel.send('メンバーがいません')
return False
async def OverTime(self, message, member : ClanMember, opt):
try:
time = int(opt)
if time < 0 or 90 < time:
raise ValueError
errmes = member.ChangeOvertime(time)
if errmes is not None:
await message.channel.send(errmes)
return False
await message.channel.send('持ち越し時間を%d秒にしました' % time)
return True
except ValueError:
await message.channel.send('時間が読み取れません')
return False
return True
async def Gacha(self, message, member : ClanMember, opt):
self.CheckOptionNone(opt)
if (IsClanBattle()):
return False
else:
await member.Gacha(message.channel)
return False
async def Gacha10000(self, message, member : ClanMember, opt):
if (IsClanBattle()):
return False
else:
await member.Gacha10000(message.channel)
return False
async def DefeatLog(self, message, member : ClanMember, opt):
text = ''
for n in self.defeatlist:
text += n + '\n'
with StringIO(text) as bs:
await message.channel.send(file=discord.File(bs, 'defeatlog.txt'))
return False
async def AttackLog(self, message, member : ClanMember, opt):
text = ''
for n in self.attacklist:
text += n + '\n'
with StringIO(text) as bs:
await message.channel.send(file=discord.File(bs, 'attacklog.txt'))
return False
async def GachaList(self, message, member : ClanMember, opt):
await message.channel.send(Gacha.GachaScheduleData())
return False
async def GachaRate(self, message, member : ClanMember, opt):
await message.channel.send(gacha.ToString())
return False
async def Score(self, message, member : ClanMember, opt):
result = self.ScoreCalc(opt)
if (result is not None):
await message.channel.send('%d-%d %s (残りHP %s %%)' %
(result.lap, result.bossindex + 1, BossName[result.bossindex], result.hprate))
return True
else:
await message.channel.send('計算できませんでした')
return False
async def Route(self, message, member : ClanMember, opt):
channel = message.channel
route = set()
for n in opt:
try:
r = int(n)
if 1 <= r and r <= 5:
route.add(r)
except ValueError:
pass
member.route = list(route)
if 0 < len(member.route):
await channel.send('凸ルート:' + ' '.join([BossName[i - 1] for i in route]))
else:
await channel.send('凸ルートをリセットしました')
return True
async def AllRoute(self, message, member : ClanMember, opt):
channel = message.channel
s = ''
bossroute = [[], [], [], [], []]
for m in self.members.values():
for r in m.route:
if 0 < r and r <= len(bossroute):
bossroute[r - 1].append(m.name)
for i, names in enumerate(bossroute):
if 0 < len(names):
s += '%s %d人 ' % (BossName[i], len(names))
s += ' '.join([name for name in names]) + '\n'
await channel.send(s)
return False
async def SettingReload(self, message, member : ClanMember, opt):
channel = message.channel
GlobalStrage.Load()
gacha.BoxReset()
await channel.send('リロードしました')
await channel.send('term %s-%s' % (BATTLESTART, BATTLEEND))
return False
| |
def verify_format(_, res):
return res
def format_index(body): # pragma: no cover
"""Format index data.
:param body: Input body.
:type body: dict
:return: Formatted body.
:rtype: dict
"""
result = {
'id': body['id'].split('/', 1)[-1],
'fields': body['fields']
}
if 'type' in body:
result['type'] = body['type']
if 'name' in body:
result['name'] = body['name']
if 'deduplicate' in body:
result['deduplicate'] = body['deduplicate']
if 'sparse' in body:
result['sparse'] = body['sparse']
if 'unique' in body:
result['unique'] = body['unique']
if 'minLength' in body:
result['min_length'] = body['minLength']
if 'geoJson' in body:
result['geo_json'] = body['geoJson']
if 'ignoreNull' in body:
result['ignore_none'] = body['ignoreNull']
if 'selectivityEstimate' in body:
result['selectivity'] = body['selectivityEstimate']
if 'isNewlyCreated' in body:
result['new'] = body['isNewlyCreated']
if 'expireAfter' in body:
result['expiry_time'] = body['expireAfter']
if 'inBackground' in body:
result['in_background'] = body['inBackground']
if 'bestIndexedLevel' in body:
result['best_indexed_level'] = body['bestIndexedLevel']
if 'worstIndexedLevel' in body:
result['worst_indexed_level'] = body['worstIndexedLevel']
if 'maxNumCoverCells' in body:
result['max_num_cover_cells'] = body['maxNumCoverCells']
return verify_format(body, result)
def format_key_options(body): # pragma: no cover
"""Format collection key options data.
:param body: Input body.
:type body: dict
:return: Formatted body.
:rtype: dict
"""
result = {}
if 'type' in body:
result['key_generator'] = body['type']
if 'increment' in body:
result['key_increment'] = body['increment']
if 'offset' in body:
result['key_offset'] = body['offset']
if 'allowUserKeys' in body:
result['user_keys'] = body['allowUserKeys']
if 'lastValue' in body:
result['key_last_value'] = body['lastValue']
return verify_format(body, result)
def format_database(body): # pragma: no cover
"""Format databases info.
:param body: Input body.
:type body: dict
:return: Formatted body.
:rtype: dict
"""
result = {}
if 'id' in body:
result['id'] = body['id']
if 'name' in body:
result['name'] = body['name']
if 'path' in body:
result['path'] = body['path']
if 'system' in body:
result['system'] = body['system']
if 'isSystem' in body:
result['system'] = body['isSystem']
# Cluster only
if 'sharding' in body:
result['sharding'] = body['sharding']
if 'replicationFactor' in body:
result['replication_factor'] = body['replicationFactor']
if 'writeConcern' in body:
result['write_concern'] = body['writeConcern']
return verify_format(body, result)
def format_collection(body): # pragma: no cover
"""Format collection data.
:param body: Input body.
:type body: dict
:return: Formatted body.
:rtype: dict
"""
result = {}
if 'id' in body:
result['id'] = body['id']
if 'objectId' in body:
result['object_id'] = body['objectId']
if 'name' in body:
result['name'] = body['name']
if 'isSystem' in body:
result['system'] = body['isSystem']
if 'isSmart' in body:
result['smart'] = body['isSmart']
if 'type' in body:
result['type'] = body['type']
result['edge'] = body['type'] == 3
if 'waitForSync' in body:
result['sync'] = body['waitForSync']
if 'status' in body:
result['status'] = body['status']
if 'statusString' in body:
result['status_string'] = body['statusString']
if 'globallyUniqueId' in body:
result['global_id'] = body['globallyUniqueId']
if 'cacheEnabled' in body:
result['cache'] = body['cacheEnabled']
if 'replicationFactor' in body:
result['replication_factor'] = body['replicationFactor']
if 'minReplicationFactor' in body:
result['min_replication_factor'] = body['minReplicationFactor']
if 'writeConcern' in body:
result['write_concern'] = body['writeConcern']
# MMFiles only
if 'doCompact' in body:
result['compact'] = body['doCompact']
if 'journalSize' in body:
result['journal_size'] = body['journalSize']
if 'isVolatile' in body:
result['volatile'] = body['isVolatile']
if 'indexBuckets' in body:
result['index_bucket_count'] = body['indexBuckets']
# Cluster only
if 'shards' in body:
result['shards'] = body['shards']
if 'replicationFactor' in body:
result['replication_factor'] = body['replicationFactor']
if 'numberOfShards' in body:
result['shard_count'] = body['numberOfShards']
if 'shardKeys' in body:
result['shard_fields'] = body['shardKeys']
if 'distributeShardsLike' in body:
result['shard_like'] = body['distributeShardsLike']
if 'shardingStrategy' in body:
result['sharding_strategy'] = body['shardingStrategy']
if 'smartJoinAttribute' in body:
result['smart_join_attribute'] = body['smartJoinAttribute']
# Key Generator
if 'keyOptions' in body:
result['key_options'] = format_key_options(body['keyOptions'])
# Replication only
if 'cid' in body:
result['cid'] = body['cid']
if 'version' in body:
result['version'] = body['version']
if 'allowUserKeys' in body:
result['user_keys'] = body['allowUserKeys']
if 'planId' in body:
result['plan_id'] = body['planId']
if 'deleted' in body:
result['deleted'] = body['deleted']
# New in 3.7
if 'syncByRevision' in body:
result['sync_by_revision'] = body['syncByRevision']
if 'tempObjectId' in body:
result['temp_object_id'] = body['tempObjectId']
if 'usesRevisionsAsDocumentIds' in body:
result['rev_as_id'] = body['usesRevisionsAsDocumentIds']
if 'isDisjoint' in body:
result['disjoint'] = body['isDisjoint']
if 'isSmartChild' in body:
result['smart_child'] = body['isSmartChild']
if 'minRevision' in body:
result['min_revision'] = body['minRevision']
if 'schema' in body:
result['schema'] = body['schema']
return verify_format(body, result)
def format_aql_cache(body):
"""Format AQL cache data.
:param body: Input body.
:type body: dict
:return: Formatted body.
:rtype: dict
"""
result = {
'mode': body['mode'],
'max_results': body['maxResults'],
'max_results_size': body['maxResultsSize'],
'max_entry_size': body['maxEntrySize'],
'include_system': body['includeSystem']
}
return verify_format(body, result)
def format_wal_properties(body): # pragma: no cover
"""Format WAL properties.
:param body: Input body.
:type body: dict
:return: Formatted body.
:rtype: dict
"""
result = {}
if 'allowOversizeEntries' in body:
result['oversized_ops'] = body['allowOversizeEntries']
if 'logfileSize' in body:
result['log_size'] = body['logfileSize']
if 'historicLogfiles' in body:
result['historic_logs'] = body['historicLogfiles']
if 'reserveLogfiles' in body:
result['reserve_logs'] = body['reserveLogfiles']
if 'syncInterval' in body:
result['sync_interval'] = body['syncInterval']
if 'throttleWait' in body:
result['throttle_wait'] = body['throttleWait']
if 'throttleWhenPending' in body:
result['throttle_limit'] = body['throttleWhenPending']
return verify_format(body, result)
def format_wal_transactions(body): # pragma: no cover
"""Format WAL transactions.
:param body: Input body.
:type body: dict
:return: Formatted body.
:rtype: dict
"""
result = {}
if 'minLastCollected' in body:
result['last_collected'] = body['minLastCollected']
if 'minLastSealed' in body:
result['last_sealed'] = body['minLastSealed']
if 'runningTransactions' in body:
result['count'] = body['runningTransactions']
return verify_format(body, result)
def format_aql_query(body): # pragma: no cover
"""Format AQL query data.
:param body: Input body.
:type body: dict
:return: Formatted body.
:rtype: dict
"""
result = {'id': body['id'], 'query': body['query']}
if 'started' in body:
result['started'] = body['started']
if 'state' in body:
result['state'] = body['state']
if 'stream' in body:
result['stream'] = body['stream']
if 'bindVars' in body:
result['bind_vars'] = body['bindVars']
if 'runTime' in body:
result['runtime'] = body['runTime']
return verify_format(body, result)
def format_aql_tracking(body): # pragma: no cover
"""Format AQL tracking data.
:param body: Input body.
:type body: dict
:return: Formatted body.
:rtype: dict
"""
result = {}
if 'enabled' in body:
result['enabled'] = body['enabled']
if 'maxQueryStringLength' in body:
result['max_query_string_length'] = body['maxQueryStringLength']
if 'maxSlowQueries' in body:
result['max_slow_queries'] = body['maxSlowQueries']
if 'slowQueryThreshold' in body:
result['slow_query_threshold'] = body['slowQueryThreshold']
if 'slowStreamingQueryThreshold' in body:
result['slow_streaming_query_threshold'] = \
body['slowStreamingQueryThreshold']
if 'trackBindVars' in body:
result['track_bind_vars'] = body['trackBindVars']
if 'trackSlowQueries' in body:
result['track_slow_queries'] = body['trackSlowQueries']
return verify_format(body, result)
def format_tick_values(body): # pragma: no cover
"""Format tick data.
:param body: Input body.
:type body: dict
:return: Formatted body.
:rtype: dict
"""
result = {}
if 'tickMin' in body:
result['tick_min'] = body['tickMin']
if 'tickMax' in body:
result['tick_max'] = body['tickMax']
if 'tick' in body:
result['tick'] = body['tick']
if 'time' in body:
result['time'] = body['time']
if 'server' in body:
result['server'] = format_server_info(body['server'])
return verify_format(body, result)
def format_server_info(body): # pragma: no cover
"""Format server data.
:param body: Input body.
:type body: dict
:return: Formatted body.
:rtype: dict
"""
return {'version': body['version'], 'server_id': body['serverId']}
def format_replication_applier_config(body): # pragma: no cover
"""Format replication applier configuration data.
:param body: Input body.
:type body: dict
:return: Formatted body.
:rtype: dict
"""
result = {}
if 'endpoint' in body:
result['endpoint'] = body['endpoint']
if 'database' in body:
result['database'] = body['database']
if 'username' in body:
result['username'] = body['username']
if 'verbose' in body:
result['verbose'] = body['verbose']
if 'incremental' in body:
result['incremental'] = body['incremental']
if 'requestTimeout' in body:
result['request_timeout'] = body['requestTimeout']
if 'connectTimeout' in body:
result['connect_timeout'] = body['connectTimeout']
if 'ignoreErrors' in body:
result['ignore_errors'] = body['ignoreErrors']
if 'maxConnectRetries' in body:
result['max_connect_retries'] = body['maxConnectRetries']
if 'lockTimeoutRetries' in body:
result['lock_timeout_retries'] = body['lockTimeoutRetries']
if 'sslProtocol' in body:
result['ssl_protocol'] = body['sslProtocol']
if 'chunkSize' in body:
result['chunk_size'] = body['chunkSize']
if 'skipCreateDrop' in body:
result['skip_create_drop'] = body['skipCreateDrop']
if 'autoStart' in body:
result['auto_start'] = body['autoStart']
if 'adaptivePolling' in body:
result['adaptive_polling'] = body['adaptivePolling']
if 'autoResync' in body:
result['auto_resync'] = body['autoResync']
if 'autoResyncRetries' in body:
result['auto_resync_retries'] = body['autoResyncRetries']
if 'maxPacketSize' in body:
result['max_packet_size'] = body['maxPacketSize']
if 'includeSystem' in body:
result['include_system'] = body['includeSystem']
if 'includeFoxxQueues' in body:
result['include_foxx_queues'] = body['includeFoxxQueues']
if 'requireFromPresent' in body:
result['require_from_present'] = body['requireFromPresent']
if 'restrictType' in body:
result['restrict_type'] = body['restrictType']
if 'restrictCollections' in body:
result['restrict_collections'] = body['restrictCollections']
if 'connectionRetryWaitTime' in body:
result['connection_retry_wait_time'] = body['connectionRetryWaitTime']
if 'initialSyncMaxWaitTime' in body:
result['initial_sync_max_wait_time'] = body['initialSyncMaxWaitTime']
if 'idleMinWaitTime' in body:
result['idle_min_wait_time'] = body['idleMinWaitTime']
if 'idleMaxWaitTime' in body:
result['idle_max_wait_time'] = body['idleMaxWaitTime']
return verify_format(body, result)
def format_applier_progress(body): # pragma: no cover
"""Format replication applier progress data.
:param body: Input body.
:type body: dict
:return: Formatted body.
:rtype: dict
"""
result = {}
if 'time' in body:
result['time'] = body['time']
if 'message' in body:
result['message'] = body['message']
if 'failedConnects' in body:
result['failed_connects'] = body['failedConnects']
return verify_format(body, result)
def format_applier_error(body): # pragma: no cover
"""Format replication applier error data.
:param body: Input body.
| |
<gh_stars>10-100
from functools import reduce
import itertools
from llvmlite import ir
import ctypes
from mathics.core.expression import Expression
from mathics.core.atoms import Integer, Real
from mathics.core.symbols import Symbol
from mathics.builtin.compile.types import int_type, real_type, bool_type, void_type
from mathics.builtin.compile.utils import pairwise, llvm_to_ctype
from mathics.builtin.compile.base import CompileError
def single_real_arg(f):
"""
One real argument.
Converts integer argument to real argument.
"""
def wrapped_f(self, expr):
elements = expr.get_elements()
if len(elements) != 1:
raise CompileError()
arg = self._gen_ir(elements[0])
if arg.type == void_type:
return arg
elif arg.type == int_type:
arg = self.int_to_real(arg)
return f(self, [arg])
return wrapped_f
def int_real_args(minargs):
"""
Many real or integer arguments expected.
If any real arguments are provided all integer arguments will be converted.
"""
def wraps(f):
def wrapped_f(self, expr):
elements = expr.get_elements()
if len(elements) < minargs:
raise CompileError()
args = [self._gen_ir(element) for element in elements]
for arg in args:
if arg.type == void_type:
return arg
if any(arg.type not in (real_type, int_type) for arg in args):
raise CompileError()
if all(arg.type == int_type for arg in args):
ret_type = int_type
else:
ret_type = real_type
for i, arg in enumerate(args):
if arg.type == int_type:
args[i] = self.int_to_real(arg)
return f(self, args, ret_type)
return wrapped_f
return wraps
def int_args(f):
"""
Integer arguments.
Converts boolean to integer arguments.
"""
def wrapped_f(self, expr):
elements = expr.get_elements()
args = [self._gen_ir(element) for element in elements]
for arg in args:
if arg.type == void_type:
return arg
for i, arg in enumerate(args):
if arg.type == bool_type:
args[i] = self.bool_to_int(arg)
if any(arg.type != int_type for arg in args):
raise CompileError()
return f(self, args)
return wrapped_f
def bool_args(f):
"""
Boolean arguments.
Converts integer to boolean arguments.
"""
def wrapped_f(self, expr):
elements = expr.get_elements()
args = [self._gen_ir(element) for element in elements]
for arg in args:
if arg.type == void_type:
return arg
for i, arg in enumerate(args):
if arg.type == int_type:
args[i] = self.int_to_bool(arg)
if any(arg.type != bool_type for arg in args):
raise CompileError()
return f(self, args)
return wrapped_f
class IRGenerator(object):
def __init__(self, expr, args, func_name):
self.expr = expr
self.args = args
self.func_name = func_name # function name of entry point
self.builder = None
self._known_ret_type = None
self._returned_type = None
self.lookup_args = None
def generate_ir(self):
"""
generates LLVM IR for a given expression
"""
# assume that the function returns a real. Note that this is verified by
# looking at the type of the head of the converted expression.
ret_type = real_type if self._known_ret_type is None else self._known_ret_type
# create an empty module
module = ir.Module(name=__file__)
func_type = ir.FunctionType(ret_type, tuple(arg.type for arg in self.args))
# declare a function inside the module
func = ir.Function(module, func_type, name=self.func_name)
# implement the function
block = func.append_basic_block(name="entry")
self.builder = ir.IRBuilder(block)
self.lookup_args = {
arg.name: func_arg for arg, func_arg in zip(self.args, func.args)
}
ir_code = self._gen_ir(self.expr)
# if the return type isn't correct then try again
if self._known_ret_type is None:
# determine the type returned
if ir_code.type == void_type:
if self._returned_type is not None:
# we returned something so use that type
self._known_ret_type = self._returned_type
# force generation again in case multiple returns of different types
return self.generate_ir()
else:
# actually returned void e.g. Print[]
pass
if ir_code.type != ret_type:
# guessed incorrectly - try again
self._known_ret_type = ir_code.type
return self.generate_ir()
# void handles its own returns
if ir_code.type != void_type:
self.builder.ret(ir_code)
return str(module), ret_type
def call_fp_intr(self, name, args, ret_type=real_type):
"""
call a LLVM intrinsic floating-point operation
"""
# see https://github.com/numba/llvmlite/pull/205 for an explanation of declare_intrinsic
mod = self.builder.module
fullname = name + "." + ret_type.intrinsic_name
fnty = ir.FunctionType(ret_type, [arg.type for arg in args])
intr = mod.declare_intrinsic(fullname, fnty=fnty)
return self.builder.call(intr, args)
def int_to_real(self, arg):
assert arg.type == int_type
return self.builder.sitofp(arg, real_type)
def int_to_bool(self, arg):
assert arg.type == int_type
# any non-zero int is true
return self.builder.icmp_signed("!=", arg, int_type(0))
def bool_to_int(self, arg):
assert arg.type == bool_type
return self.builder.zext(arg)
def add_caller(self, py_f, ret_type, args):
"""
Inserts a caller to a python function
"""
# see http://eli.thegreenplace.net/2015/calling-back-into-python-from-llvmlite-jited-code/
c_func_type = ctypes.CFUNCTYPE(
llvm_to_ctype(ret_type), *(llvm_to_ctype(arg.type) for arg in args)
)
c_func = c_func_type(py_f)
c_func_addr = ctypes.cast(c_func, ctypes.c_void_p).value
addrcaller_func_type = ir.FunctionType(ret_type, [arg.type for arg in args])
cb_func_ptr_type = addrcaller_func_type.as_pointer()
f = self.builder.inttoptr(int_type(c_func_addr), cb_func_ptr_type, name="f")
call = self.builder.call(f, args)
if call.type == void_type:
return self.builder.ret_void()
return call
def _gen_ir(self, expr):
"""
walks an expression tree and constructs the ir block
"""
if isinstance(expr, Symbol):
try:
arg = self.lookup_args[expr.get_name()]
except KeyError:
raise CompileError()
return arg
elif isinstance(expr, Integer):
return int_type(expr.get_int_value())
elif isinstance(expr, Real):
return real_type(expr.round_to_float())
elif not isinstance(expr, Expression):
raise CompileError()
head_name = expr.get_head_name()
if head_name.startswith("System`"):
head_name = head_name[7:]
method = getattr(self, "_gen_" + head_name, None)
else:
method = None
if method is None:
raise CompileError()
return method(expr)
def _gen_If(self, expr):
if not expr.has_form("If", 3):
raise CompileError()
builder = self.builder
args = expr.get_elements()
# condition
cond = self._gen_ir(args[0])
if cond.type == int_type:
cond = self.int_to_bool(cond)
if cond.type != bool_type:
raise CompileError()
# construct new blocks
then_block = builder.append_basic_block()
else_block = builder.append_basic_block()
# branch to then or else block
builder.cbranch(cond, then_block, else_block)
# results for both block
with builder.goto_block(then_block):
then_result = self._gen_ir(args[1])
with builder.goto_block(else_block):
else_result = self._gen_ir(args[2])
# type check both blocks - determine resulting type
if then_result.type == void_type and else_result.type == void_type:
# both blocks terminate so no continuation block
return then_result
elif then_result.type == else_result.type:
ret_type = then_result.type
elif then_result.type == int_type and else_result.type == real_type:
builder.position_at_end(then_block)
then_result = self.int_to_real(then_result)
ret_type = real_type
elif then_result.type == real_type and else_result.type == int_type:
builder.position_at_end(else_block)
else_result = self.int_to_real(else_result)
ret_type = real_type
elif then_result.type == void_type and else_result.type != void_type:
ret_type = else_result.type
elif then_result.type != void_type and else_result.type == void_type:
ret_type = then_result.type
else:
raise CompileError()
# continuation block
cont_block = builder.append_basic_block()
builder.position_at_start(cont_block)
result = builder.phi(ret_type)
# both blocks branch to continuation block (unless they terminate)
if then_result.type != void_type:
with builder.goto_block(then_block):
builder.branch(cont_block)
result.add_incoming(then_result, then_block)
if else_result.type != void_type:
with builder.goto_block(else_block):
builder.branch(cont_block)
result.add_incoming(else_result, else_block)
return result
def _gen_Return(self, expr):
elements = expr.get_elements()
if len(elements) != 1:
raise CompileError()
arg = self._gen_ir(elements[0])
if arg.type == void_type:
return arg
if self._returned_type == arg.type:
pass
elif self._returned_type is None:
self._returned_type = arg.type
elif self._returned_type == real_type and arg.type == int_type:
arg = self.int_to_real(arg)
elif self._returned_type == int_type and arg.type == real_type:
self._returned_type = arg.type
else:
raise CompileError(
"Conflicting return types {} and {}.".format(
self._returned_type, arg.type
)
)
return self.builder.ret(arg)
@int_real_args(1)
def _gen_Plus(self, args, ret_type):
if ret_type == real_type:
return reduce(self.builder.fadd, args)
elif ret_type == int_type:
return reduce(self.builder.add, args)
@int_real_args(1)
def _gen_Times(self, args, ret_type):
if ret_type == real_type:
return reduce(self.builder.fmul, args)
elif ret_type == int_type:
return reduce(self.builder.mul, args)
def _gen_Power(self, expr):
# TODO (int_type, int_type) power
elements = expr.get_elements()
if len(elements) != 2:
raise CompileError()
# convert exponent
exponent = self._gen_ir(elements[1])
if exponent.type == int_type:
exponent = self.int_to_real(exponent)
elif exponent.type == void_type:
return exponent
# E ^ exponent
if elements[0].sameQ(Symbol("E")) and exponent.type == real_type:
return self.call_fp_intr("llvm.exp", [exponent])
# 2 ^ exponent
if elements[0].get_int_value() == 2 and exponent.type == real_type:
return self.call_fp_intr("llvm.exp2", [exponent])
# convert base
base = self._gen_ir(elements[0])
if base.type == int_type:
base = self.int_to_real(base)
elif base.type == void_type:
return base
# base ^ exponent
if base.type == real_type and exponent.type == real_type:
return self.call_fp_intr("llvm.pow", [base, exponent])
else:
raise CompileError()
@single_real_arg
def _gen_Sin(self, args):
return self.call_fp_intr("llvm.sin", args)
@single_real_arg
def _gen_Cos(self, args):
return self.call_fp_intr("llvm.cos", args)
@single_real_arg
def _gen_Tan(self, args):
# FIXME this approach is inaccurate
sinx = self.call_fp_intr("llvm.sin", args)
cosx = self.call_fp_intr("llvm.cos", args)
return self.builder.fdiv(sinx, cosx)
@single_real_arg
def _gen_Sec(self, args):
# FIXME this approach is inaccurate
cosx = self.call_fp_intr("llvm.cos", args)
return self.builder.fdiv(real_type(1.0), cosx)
@single_real_arg
def _gen_Csc(self, args):
# FIXME this approach is inaccurate
sinx = self.call_fp_intr("llvm.sin", args)
return self.builder.fdiv(real_type(1.0), sinx)
@single_real_arg
def _gen_Cot(self, args):
# FIXME this approach is inaccurate
sinx = self.call_fp_intr("llvm.sin", args)
cosx = self.call_fp_intr("llvm.cos", args)
return self.builder.fdiv(cosx, sinx)
@single_real_arg
def _gen_Exp(self, args):
return self.call_fp_intr("llvm.exp", args)
@single_real_arg
def _gen_Log(self, args):
return self.call_fp_intr("llvm.log", args)
@int_real_args(1)
def _gen_Abs(self, args, ret_type):
if len(args) != 1:
raise CompileError()
arg = args[0]
if ret_type == int_type:
# FIXME better way to do this?
neg_arg = self.builder.mul(arg, int_type(-1))
cond = self.builder.icmp_signed("<", arg, int_type(0))
return self.builder.select(cond, neg_arg, arg)
| |
of pcolor.
Args:
p1,p2,pcolor : parameter names
npoints : first thins the chain so this number of points are plotted
cmap : a colormap (default: jet)
nsig : map the range of the color map to +/- nsig
ax : axes to use for plotting (default: current axes)
cbar : whether to draw a colorbar
cax : axes to use for colorbar (default: steal from ax)
marker, markersize, zorder, **kwargs : passed to the plot() command
"""
from matplotlib.pyplot import get_cmap, cm, gca, sca, colorbar
from matplotlib import colors, colorbar
if cmap is None: cmap=get_cmap('jet')
if ax is None: ax=gca()
mu,sig = chain.mean(pcolor), chain.std(pcolor)
for s in chain.thin(int(sum(chain['weight'])/float(npoints))).iterrows():
if clim is None: c=cmap((s[pcolor]-mu)/(2*nsig*sig) + 0.5)
else: c = cmap((s[pcolor]-clim[0])/(clim[1]-clim[0]))
ax.plot(s[p1],s[p2],color=c,markeredgecolor=c,marker=marker,markersize=markersize,zorder=-1,**kwargs)
if cax is None: cax = colorbar.make_axes(ax)[0]
if clim is None: cb = colorbar.ColorbarBase(ax=cax, norm=colors.Normalize(vmin=mu-nsig*sig, vmax=mu+nsig*sig))
else: cb = colorbar.ColorbarBase(ax=cax, norm=colors.Normalize(vmin=clim[0], vmax=clim[1]))
sca(ax)
return ax,cax
def like2d(datx,daty,weights=None,
nbins=15,which=[.68,.95],
filled=True, color=None, cmap=None,
ax=None,
**kwargs):
from matplotlib.pyplot import gca, get_cmap
from matplotlib.mlab import movavg
from matplotlib.colors import LinearSegmentedColormap
if ax is None: ax = gca()
if weights is None: weights=ones(len(datx))
if color is None: color = kwargs.pop('c') if 'c' in kwargs else 'b'
H,xe,ye = histogram2d(datx,daty,nbins,weights=weights)
xem, yem = movavg(xe,2), movavg(ye,2)
args = (xem,yem,transpose(H))
kwargs = dict(levels=confint2d(H, sorted(which)[::-1]+[0]),**kwargs)
if cmap is None:
cmap = {'b':'Blues',
'g':'Greens',
'r':'Reds',
'orange':'Oranges',
'grey':'Greys'}.get(color)
if cmap is None: cmap = LinearSegmentedColormap.from_list(None,['w',color])
else: cmap = get_cmap(cmap)
if filled: ax.contourf(*args,cmap=cmap,**kwargs)
ax.contour(*args,colors=color,**kwargs)
def like1d(dat,weights=None,
nbins=30,range=None,maxed=True,
ax=None,
**kw):
from matplotlib.pyplot import gca
from matplotlib.mlab import movavg
if ax is None: ax = gca()
if weights is None: weights=ones(len(dat))
H, xe = histogram(dat,bins=nbins,weights=weights,normed=True,range=range)
if maxed: H=H/max(H)
xem=movavg(xe,2)
ax.plot(xem,H,**kw)
def get_correlation(data,weights=None):
cv = get_covariance(data,weights)
n,n = cv.shape
for i in range(n):
std=sqrt(cv[i,i])
cv[i,:]/=std
cv[:,i]/=std
return cv
def get_covariance(data,weights=None):
if (weights is None): return cov(data.T)
else:
mean = sum(data.T*weights,axis=1)/sum(weights)
zdata = data-mean
return dot(zdata.T*weights,zdata)/(sum(weights)-1)
def likegrid(chains, params=None,
lims=None, ticks=None, nticks=5,
default_chain=0,
spacing=0.05,
xtick_rotation=30,
colors=None, filled=True,
nbins1d=30, nbins2d=20,
labels=None,
fig=None,
size=2,
legend_loc=None,
param_name_mapping=None,
param_label_size=None):
"""
Make a grid (aka "triangle plot") of 1- and 2-d likelihood contours.
Parameters
----------
chains :
one or a list of `Chain` objects
default_chain, optional :
the chain used to get default parameters names, axes limits, and ticks
either an index into chains or a `Chain` object (default: chains[0])
params, optional :
list of parameter names which to show
(default: all parameters from default_chain)
lims, optional :
a dictionary mapping parameter names to (min,max) axes limits
(default: +/- 4 sigma from default_chain)
ticks, optional :
a dictionary mapping parameter names to list of [ticks]
(default: automatically picks `nticks`)
nticks, optional :
roughly how many ticks per axes (default: 5)
xtick_rotation, optional :
numbers of degrees to rotate the xticks by (default: 30)
spacing, optional :
space in between plots as a fraction of figure width (default: 0.05)
fig, optional :
figure of figure number in which to plot (default: figure(0))
size, optional :
size in inches of one plot (default: 2)
colors, optional :
colors to cycle through for plotting
filled, optional :
whether to fill in the contours (default: True)
labels, optional :
list of names for a legend
legend_loc, optional :
(x,y) location of the legend (coordinates scaled to [0,1])
nbins1d, optional :
number (or len(chains) length list) of bins for 1d plots (default: 30)
nbins2d, optional :
number (or len(chains) length list) of bins for 2d plots (default: 20)
"""
from matplotlib.pyplot import figure, Line2D, xticks
from matplotlib.ticker import MaxNLocator
fig = figure(0) if fig is None else (figure(fig) if isinstance(fig,int) else fig)
if type(chains)!=list: chains=[chains]
if params==None: params = sorted(reduce(lambda x,y: set(x)&set(y), [c.params() for c in chains]))
if param_name_mapping is None: param_name_mapping = {}
if size is not None: fig.set_size_inches(*([size*len(params)]*2))
if colors is None: colors=['b','orange','k','m','cyan']
if not isinstance(nbins2d,list): nbins2d = [nbins2d]*len(chains)
if not isinstance(nbins1d,list): nbins1d = [nbins1d]*len(chains)
fig.subplots_adjust(hspace=spacing,wspace=spacing)
c=chains[default_chain] if isinstance(default_chain,int) else default_chain
lims = dict({p:(max(min(c[p]),mean(c[p])-4*std(c[p])),min(max(c[p]),mean(c[p])+4*std(c[p]))) for p in params},**(lims if lims is not None else {}))
if ticks is None: ticks = {}
if isinstance(nticks,int): nticks={p:nticks for p in params}
n=len(params)
for (i,p1) in enumerate(params):
for (j,p2) in enumerate(params):
if (i<=j):
ax=fig.add_subplot(n,n,j*n+i+1)
ax.xaxis.set_major_locator(MaxNLocator(nticks.get(p1,5)))
ax.yaxis.set_major_locator(MaxNLocator(nticks.get(p2,5)))
ax.set_xlim(*lims[p1])
if (i==j):
for (ch,col,nbins) in zip(chains,colors,nbins1d):
if p1 in ch: ch.like1d(p1,nbins=nbins,color=col,ax=ax)
ax.set_yticks([])
elif (i<j):
for (ch,col,nbins) in zip(chains,colors,nbins2d):
if p1 in ch and p2 in ch: ch.like2d(p1,p2,filled=filled,nbins=nbins,color=col,ax=ax)
if p2 in ticks: ax.set_yticks(ticks[p2])
ax.set_ylim(*lims[p2])
if i==0:
ax.set_ylabel(param_name_mapping.get(p2,p2),size=param_label_size)
else:
ax.set_yticklabels([])
if j==n-1:
ax.set_xlabel(param_name_mapping.get(p1,p1),size=param_label_size)
xticks(rotation=xtick_rotation)
else:
ax.set_xticklabels([])
if labels is not None:
fig.legend([Line2D([0],[0],c=c,lw=2) for c in colors],labels,fancybox=True,shadow=False,loc=legend_loc)
from collections import Iterable
import operator as op
def likegrid1d(chains,
params='all',
lims=None,
ticks=None,
nticks=4,
nsig=3,
colors=None,
nbins1d=30,
labels=None,
fig=None,
size=2,
aspect=1,
legend_loc=None,
linewidth=1,
param_name_mapping=None,
param_label_size=None,
tick_label_size=None,
titley=1,
ncol=4,
axes=None):
"""
Make a grid of 1-d likelihood contours.
Arguments:
----------
chains :
one or a list of `Chain` objects
default_chain, optional :
the chain used to get default parameters names, axes limits, and ticks
either an index into chains or a `Chain` object (default: chains[0])
params, optional :
list of parameter names which to show
can also be 'all' or 'common' which does the union/intersection of
the params in all the chains
lims, optional :
a dictionary mapping parameter names to (min,max) axes limits
(default: +/- 4 sigma from default_chain)
ticks, optional :
a dictionary giving a list of ticks for each parameter
nticks, optional :
roughly how many x ticks to show. can be dictionary to
specify each parameter separately. (default: 4)
fig, optional :
figure of figure number in which to plot (default: new figure)
ncol, optional :
the number of colunms (default: 4)
axes, optional :
an array of axes into which to plot. if this is provided, fig and ncol
are ignored. must have len(axes) >= len(params).
size, optional :
size in inches of one plot (default: 2)
aspect, optional :
aspect ratio (default: 1)
colors, optional :
colors to cycle through for plotting
filled, optional :
whether to fill in the contours (default: True)
labels, optional :
list of names for a legend
legend_loc, optional :
(x,y) location of the legend (coordinates scaled to [0,1])
nbins1d, optional :
number of bins for 1d plots (default: 30)
nbins2d, optional :
number of bins for 2d plots (default: 20)
"""
from matplotlib.pyplot import figure, Line2D
from matplotlib.ticker import AutoMinorLocator, ScalarFormatter, MaxNLocator
if type(chains)!=list: chains=[chains]
if params in ['all','common']:
params = sorted(reduce(lambda x,y: (op.__or__ if params=='all' else op.__and__)(set(x),set(y)), [c.params() for c in chains]))
elif not isinstance(params,Iterable):
raise ValueError("params should be iterable or 'all' or 'common'")
if param_name_mapping is None: param_name_mapping = {}
nrow = len(params)/ncol+1
if axes is None:
if fig is None: fig = figure(fig) if isinstance(fig,int) else figure()
if size is not None: fig.set_size_inches(size*ncol,size*nrow/aspect)
fig.subplots_adjust(hspace=0.4,wspace=0.1)
if colors is None: colors=['b','orange','k','m','cyan']
if lims is None: lims = {}
lims = {p:(lims[p] if p in lims
else (min(max(min(c[p]),mean(c[p])-nsig*std(c[p])) for c in chains if p in c.params()),
max(min(max(c[p]),mean(c[p])+nsig*std(c[p])) for c in chains if p in c.params())))
for p in params}
n=len(params)
for (i,p1) in enumerate(params,0 if axes is not None else 2 if labels is not None else 1):
ax=axes[i] if axes is not None else fig.add_subplot(nrow,ncol,i)
if ticks is not None and p1 in ticks:
ax.set_xticks(ticks[p1])
for (ch,col) in zip(chains,colors):
if p1 in ch: ch.like1d(p1,nbins=nbins1d,color=col,ax=ax,linewidth=linewidth)
ax.set_yticks([])
ax.set_xlim(lims[p1])
ax.set_ylim(0,1)
ax.set_title(param_name_mapping.get(p1,p1),size=param_label_size,y=titley)
ax.tick_params(labelsize=tick_label_size)
if ticks and p1 in ticks:
ax.set_xticks(ticks[p1])
else:
ax.xaxis.set_major_locator(MaxNLocator(nbins=nticks.get(p1,4) if isinstance(nticks,dict) else nticks))
ax.xaxis.set_minor_locator(AutoMinorLocator())
ax.xaxis.set_major_formatter(ScalarFormatter(useOffset=False))
if labels is not None:
fig.legend([Line2D([0],[0],c=c,linewidth=3) for c in colors],labels,fancybox=True,shadow=False,
loc=legend_loc if legend_loc is not None else (0,1-1./nrow))
def confint2d(hist,which):
"""
| |
#
#
#
from optparse import OptionParser
from pykd import *
IPv4 = 0x0008
ARP = 0x0608
IPv6 = 0xdd86
ICMP_PROTO = 0x01
UDP_PROTO = 0x11
TCP_PROTO = 0x06
NET_BUFFER_LIST = None
MDL = None
NET_BUFFER = None
def getNdisTypesInfo():
ndis = module("ndis")
global NET_BUFFER_LIST
global MDL
global NET_BUFFER
try:
NET_BUFFER_LIST = ndis.type("_NET_BUFFER_LIST")
MDL = ndis.type("_MDL")
NET_BUFFER = ndis.type("_NET_BUFFER")
except SymbolException:
NET_BUFFER_LIST =typeInfo("_NET_BUFFER_LIST")
MDL = typeInfo("_MDL")
NET_BUFFER = typeInfo("_NET_BUFFER")
def getHostWord( dataPos ):
return ( dataPos.next() << 8 ) + dataPos.next()
def getNetWord( dataPos ):
return dataPos.next() + ( dataPos.next() << 8 )
def getHostDWord( dataPos ):
return ( dataPos.next() << 24 ) + ( dataPos.next() << 16 ) + ( dataPos.next() << 8 ) + dataPos.next()
def getNetDWord( dataPos ):
return dataPos.next() + ( dataPos.next() << 8 ) + ( dataPos.next() << 16 ) + ( dataPos.next() << 24 )
class UdpPacket:
def __init__( self, dataPos ):
self.parsed = False
try:
self.sourcePort = getHostWord( dataPos )
self.destPort = getHostWord( dataPos )
self.length = getHostWord( dataPos )
self.checksum = getHostWord( dataPos )
self.parsed = True
except StopIteration:
pass
def __str__( self ):
s = "UDP header: "
if self.parsed:
s += "OK\n"
s += "\tSrc port: %d\n" % self.sourcePort
s += "\tDest port: %d\n" % self.destPort
s += "\tLength: %d\n" % self.length
s += "\tChecksum: %#x\n" % self.checksum
s += "\n"
else:
s += "MALFORMED\n"
return s
class TcpPacket:
def __init__( self, dataPos ):
self.parsed = False
try:
self.parsed = True
self.sourcePort = getHostWord( dataPos )
self.destPort = getHostWord( dataPos )
self.SeqNumber = getHostDWord( dataPos )
self.AckNumber = getHostDWord( dataPos )
self.dataOffset = ( dataPos.next() >> 4 )
self.flags = dataPos.next() & 0x3F
self.window = getHostWord( dataPos )
self.checksum = getHostWord( dataPos )
self.urgentPointer = getHostWord( dataPos )
except StopIteration:
pass
def __str__( self ):
s = "TCP header: "
fl = [ "FIN", "SYN","RST", "PSH", "ACK", "URG" ]
if self.parsed:
s += "OK\n"
s += "\tSrc port: %d\n" % self.sourcePort
s += "\tDest port: %d\n" % self.destPort
s += "\tSEQ: %x\n" % self.SeqNumber
s += "\tACK: %x\n" % self.AckNumber
s += "\tFlags: %x ( %s )\n" % ( self.flags, " ".join( [ fl[i] for i in xrange( len(fl) ) if ( self.flags & ( 1 << i ) ) != 0 ] ) )
s += "\tWindows: %x\n" % self.window
s += "\tChecksum: %x\n" % self.checksum
else:
s += "MALFORMED\n"
return s
class ArpPacket:
def __init__( self, dataPos ):
pass
def __str__( self ):
return ""
class IpAddress:
def __init__( self, dataPos ):
self.addr = [ dataPos.next() for i in xrange(4) ]
def __str__( self ):
return "%d.%d.%d.%d" % tuple( self.addr[0:4] )
class Ip6Address:
def __init__( self, dataPos ):
self.addr = [ getHostWord( dataPos ) for i in xrange(8) ]
def __str__( self ):
return "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x" % tuple( self.addr )
class IpProtocol:
def __init__( self, dataPos ):
self.typeVal = dataPos.next()
def isICMP( self ):
return self.typeVal==ICMP_PROTO
def isUDP( self ):
return self.typeVal==UDP_PROTO
def isTCP( self ):
return self.typeVal==TCP_PROTO
def __str__( self ):
return { ICMP_PROTO: "ICMP", UDP_PROTO: "UDP", TCP_PROTO: "TCP" }.get( self.typeVal, hex(self.typeVal) )
def getNextLayerPacket( self, dataPos ):
return {
ICMP_PROTO : lambda x : "",
UDP_PROTO : lambda x : UdpPacket(x),
TCP_PROTO : lambda x : TcpPacket(x)
}.get( self.typeVal, lambda x : "Unknown protocol" )(dataPos)
class IpPacket:
def __init__( self, dataPos ):
self.parsed = False
try:
version = dataPos.next()
self.ihl = version & 0xF
self.version = version >> 4
self.tos = dataPos.next()
self.TotalLength = getHostWord( dataPos )
self.ident = getHostWord( dataPos )
frag = getHostWord( dataPos )
self.offset = frag & 0x1FFF
self.flags = frag >> 13
self.ttl = dataPos.next()
self.protocol = IpProtocol( dataPos )
self.checlsum = getNetWord( dataPos )
self.srcAddr = IpAddress( dataPos )
self.destAddr = IpAddress( dataPos )
if self.offset == 0:
self.nextLayerPckt = self.protocol.getNextLayerPacket( dataPos )
else:
self.nextLayerPckt = ""
self.parsed = True
except StopIteration:
pass
def __str__( self ):
s = "IPv4 header: "
if self.parsed:
s += "OK\n"
s += "\tversion: %x\n" % self.version
s += "\theader length: %d bytes\n" % ( self.ihl * 4 )
s += "\ttotal length: %d bytes\n" % self.TotalLength
s += "\tID: %x\n" % self.ident
s += "\tflags: %x\n" % self.flags
s += "\toffset: %x" % ( self.offset * 8)
if ( self.offset == 0 ) and ( self.flags & 0x4 == 0 ):
s += " (not fargmented)\n"
elif self.offset == 0 :
s += " (first fragment)\n"
elif not ( self.flags & 0x4 == 0 ):
s += " (fragmented)\n"
else:
s += " (last fragment)\n"
s += "\tprotocol: " + str( self.protocol ) + "\n"
s += "\tTTL: %d\n" % self.ttl
s += "\tSrc addr: " + str(self.srcAddr) + "\n"
s += "\tDest addr: " + str(self.destAddr) + "\n"
s += str( self.nextLayerPckt )
else:
s += "MALFORMED\n"
return s
class Ip6Packet():
def __init__( self, dataPos ):
self.parsed = False
try:
t = getHostDWord( dataPos )
self.version = ( t >> 28 ) & 0xF
self.trafficClass = ( t >> 20 ) & 0xFF
self.flowLabel = t & 0xFFF
self.payloadLength = getNetWord( dataPos )
self.nextHeader = IpProtocol( dataPos )
self.hopLimit = dataPos.next()
self.srcAddr = Ip6Address( dataPos )
self.destAddr = Ip6Address( dataPos )
self.nextLayerPckt = self.nextHeader.getNextLayerPacket( dataPos )
self.parsed = True
except StopIteration:
pass
def __str__( self ):
s = "IPv6 header: "
if self.parsed:
s += "OK\n"
s += "\tversion: %x\n" % self.version
s += "\ttraffic class %x\n" % self.trafficClass
s += "\tflowLabel: %x\n" % self.flowLabel
s += "\tpayloadLength: %x\n" % self.payloadLength
s += "\tnextHeader: " + str( self.nextHeader ) + "\n"
s += "\thopLimit: %d\n" % self.hopLimit
s += "\tsrcAddr: " + str(self.srcAddr) + "\n"
s += "\tdestAddr: " + str(self.destAddr) + "\n"
s += str( self.nextLayerPckt )
else:
s += "MALFORMED\n"
return s
class ARPPacket():
def __init__( self, dataPos ):
self.parsed = False
try:
self.HWType = getNetWord( dataPos )
self.PType = getNetWord( dataPos )
self.HLen = dataPos.next()
self.PLen = dataPos.next()
self.oper = getNetWord( dataPos )
self.senderHWAddr = EthernetAddress( dataPos )
self.senderPAddr = IpAddress( dataPos )
self.targetHWAddr = EthernetAddress( dataPos )
self.targetPAddr = IpAddress( dataPos )
self.parsed = True
except StopIteration:
pass
def __str__( self ):
s = "ARP Packet: "
if self.parsed:
s += "OK\n"
s += { 0x100: "REQUEST", 0x200: "REPLAY" }.get(self.oper, hex(self.oper) ) + "\n"
s += "HTYPE: " + { 0x100: "Ethernet", }.get( self.HWType, hex( self.HWType) ) + " "
s += "PTYPE: " + { IPv4: "IPv4", }.get( self.PType, hex( self.PType) ) + " "
s += "HLEN: %x " % self.HLen
s += "PLEN: %x " % self.PLen
s += "\nSender: " + str(self.senderHWAddr) + " " + str( self.senderPAddr )
s += "\nTarget: " + str(self.targetHWAddr) + " " + str( self.targetPAddr ) + "\n"
else:
s += "MALFORMED\n"
return s
class EthernetType:
def __init__( self, dataPos ):
self.typeVal = getNetWord( dataPos )
def isIPv4( self ):
return self.typeVal == IPv4
def isARP( self ):
return self.typeVal == ARP
def isIPv6( self ):
return self.typeVal == IPv6
def __str__( self ):
return { IPv4 : "IPv4", ARP : "ARP", IPv6 : "IPv6" }.get( self.typeVal, str(self.typeVal) )
def getNextLayerPacket( self, dataPos ):
return {
IPv4 : lambda x : IpPacket(x),
ARP : lambda x : ARPPacket(x),
IPv6 : lambda x : Ip6Packet(x),
}.get( self.typeVal, lambda x : "" )( dataPos )
class EthernetAddress:
def __init__( self, dataPos ):
self.addr = [ dataPos.next() for i in range(0,6) ]
def __str__( self ):
return "%02x-%02x-%02x-%02x-%02x-%02x" % tuple( self.addr[0:6] )
class EthernetPacket:
def __init__( self, dataPos ):
self.parsed = False
try:
self.destAddress = EthernetAddress( dataPos)
self.srcAddress = EthernetAddress( dataPos)
self.frametype = EthernetType( dataPos )
self.nextLayerPckt = self.frametype.getNextLayerPacket( dataPos )
self.parsed = True
except StopIteration:
pass
def __str__( self):
s = "Ethernet header: "
if self.parsed:
s | |
_f is not None:
rc = repr(_f.f_code).split(',')[0].split()[-1]
rc = as_unicode(rc)
code_list.insert(0, rc)
_f = _f.f_back
finally:
f = None
_f = None
tb = None
_tb = None
ctx.frame_release()
#print code_list
__s = [(a, b, c, d) for (a, b, c, d) in s if g_fDebug or c != 'rpdb2_import_wrapper']
if (ctx.m_uef_lineno is not None) and (len(__s) > 0):
(a, b, c, d) = __s[0]
__s = [(a, ctx.m_uef_lineno, c, d)] + __s[1:]
r = {}
r[DICT_KEY_STACK] = __s
r[DICT_KEY_CODE_LIST] = code_list
r[DICT_KEY_TID] = tid
r[DICT_KEY_BROKEN] = ctx.m_fBroken
r[DICT_KEY_EVENT] = as_unicode([ctx.m_event, 'exception'][fException])
if tid == ctid:
r[DICT_KEY_CURRENT_TID] = True
return r
def get_stack(self, tid_list, fAll, fException):
if fException and (fAll or (len(tid_list) != 0)):
raise BadArgument
ctx = self.get_current_ctx()
ctid = ctx.m_thread_id
if fAll:
ctx_list = list(self.get_threads().values())
elif fException or (len(tid_list) == 0):
ctx_list = [ctx]
else:
ctx_list = [self.get_threads().get(t, None) for t in tid_list]
_sl = [self.__get_stack(ctx, ctid, fException) for ctx in ctx_list if ctx is not None]
sl = [s for s in _sl if s is not None]
return sl
def get_source_file(self, filename, lineno, nlines, frame_index, fException):
assert(is_unicode(filename))
if lineno < 1:
lineno = 1
nlines = -1
_lineno = lineno
r = {}
frame_filename = None
try:
ctx = self.get_current_ctx()
try:
f = None
base_frame = None
base_frame = ctx.frame_acquire()
(f, frame_lineno) = ctx.get_frame(base_frame, frame_index, fException)
frame_filename = calc_frame_path(f)
finally:
f = None
base_frame = None
ctx.frame_release()
frame_event = [[ctx.m_event, 'call'][frame_index > 0], 'exception'][fException]
except NoThreads:
if filename in [None, '']:
raise
if filename in [None, '']:
__filename = frame_filename
r[DICT_KEY_TID] = ctx.m_thread_id
elif not is_provider_filesystem(filename):
__filename = as_string(filename, sys.getfilesystemencoding())
else:
__filename = FindFile(filename, fModules = True)
if not IsPythonSourceFile(__filename):
raise NotPythonSource
_filename = winlower(__filename)
lines = []
breakpoints = {}
fhide_pwd_mode = False
while nlines != 0:
try:
g_traceback_lock.acquire()
line = get_source_line(_filename, _lineno)
finally:
g_traceback_lock.release()
if line == '':
break
#
# Remove any trace of session password from data structures that
# go over the network.
#
if fhide_pwd_mode:
if not ')' in line:
line = as_unicode('...\n')
else:
line = '...""")' + line.split(')', 1)[1]
fhide_pwd_mode = False
elif 'start_embedded_debugger(' in line:
ls = line.split('start_embedded_debugger(', 1)
line = ls[0] + 'start_embedded_debugger("""...Removed-password-from-output...'
if ')' in ls[1]:
line += '""")' + ls[1].split(')', 1)[1]
else:
line += '\n'
fhide_pwd_mode = True
lines.append(line)
try:
bp = self.m_bp_manager.get_breakpoint(_filename, _lineno)
breakpoints[_lineno] = as_unicode([STATE_DISABLED, STATE_ENABLED][bp.isEnabled()])
except KeyError:
pass
_lineno += 1
nlines -= 1
if frame_filename == _filename:
r[DICT_KEY_FRAME_LINENO] = frame_lineno
r[DICT_KEY_EVENT] = as_unicode(frame_event)
r[DICT_KEY_BROKEN] = ctx.m_fBroken
r[DICT_KEY_LINES] = lines
r[DICT_KEY_FILENAME] = as_unicode(_filename, sys.getfilesystemencoding())
r[DICT_KEY_BREAKPOINTS] = breakpoints
r[DICT_KEY_FIRST_LINENO] = lineno
return r
def __get_source(self, ctx, nlines, frame_index, fException):
tid = ctx.m_thread_id
_frame_index = [0, frame_index][tid == self.m_current_ctx.m_thread_id]
try:
try:
f = None
base_frame = None
base_frame = ctx.frame_acquire()
(f, frame_lineno) = ctx.get_frame(base_frame, _frame_index, fException)
frame_filename = calc_frame_path(f)
except (ThreadDone, InvalidFrame):
return None
finally:
f = None
base_frame = None
ctx.frame_release()
frame_event = [[ctx.m_event, 'call'][frame_index > 0], 'exception'][fException]
first_line = max(1, frame_lineno - nlines // 2)
_lineno = first_line
lines = []
breakpoints = {}
fhide_pwd_mode = False
while nlines != 0:
try:
g_traceback_lock.acquire()
line = get_source_line(frame_filename, _lineno)
finally:
g_traceback_lock.release()
if line == '':
break
#
# Remove any trace of session password from data structures that
# go over the network.
#
if fhide_pwd_mode:
if not ')' in line:
line = as_unicode('...\n')
else:
line = '...""")' + line.split(')', 1)[1]
fhide_pwd_mode = False
elif 'start_embedded_debugger(' in line:
ls = line.split('start_embedded_debugger(', 1)
line = ls[0] + 'start_embedded_debugger("""...Removed-password-from-output...'
if ')' in ls[1]:
line += '""")' + ls[1].split(')', 1)[1]
else:
line += '\n'
fhide_pwd_mode = True
lines.append(line)
try:
bp = self.m_bp_manager.get_breakpoint(frame_filename, _lineno)
breakpoints[_lineno] = as_unicode([STATE_DISABLED, STATE_ENABLED][bp.isEnabled()])
except KeyError:
pass
_lineno += 1
nlines -= 1
r = {}
r[DICT_KEY_FRAME_LINENO] = frame_lineno
r[DICT_KEY_EVENT] = as_unicode(frame_event)
r[DICT_KEY_BROKEN] = ctx.m_fBroken
r[DICT_KEY_TID] = tid
r[DICT_KEY_LINES] = lines
r[DICT_KEY_FILENAME] = as_unicode(frame_filename, sys.getfilesystemencoding())
r[DICT_KEY_BREAKPOINTS] = breakpoints
r[DICT_KEY_FIRST_LINENO] = first_line
return r
def get_source_lines(self, nlines, fAll, frame_index, fException):
if fException and fAll:
raise BadArgument
if fAll:
ctx_list = list(self.get_threads().values())
else:
ctx = self.get_current_ctx()
ctx_list = [ctx]
_sl = [self.__get_source(ctx, nlines, frame_index, fException) for ctx in ctx_list]
sl = [s for s in _sl if s is not None]
return sl
def __get_locals_globals(self, frame_index, fException, fReadOnly = False):
ctx = self.get_current_ctx()
(_globals, _locals, _original_locals_copy) = ctx.get_locals_copy(frame_index, fException, fReadOnly)
return (_globals, _locals, _original_locals_copy)
def __calc_number_of_subnodes(self, r):
for t in [bytearray, bytes, str, str8, unicode, int, long, float, bool, type(None)]:
if t is type(r):
return 0
try:
try:
if isinstance(r, frozenset) or isinstance(r, set):
return len(r)
except NameError:
pass
if isinstance(r, sets.BaseSet):
return len(r)
if isinstance(r, dict):
return len(r)
if isinstance(r, list):
return len(r)
if isinstance(r, tuple):
return len(r)
return len(dir(r))
except AttributeError:
return 0
return 0
def __calc_subnodes(self, expr, r, fForceNames, filter_level, repr_limit, encoding):
snl = []
try:
if isinstance(r, frozenset) or isinstance(r, set):
if len(r) > MAX_SORTABLE_LENGTH:
g = r
else:
g = [i for i in r]
sort(g)
for i in g:
if len(snl) >= MAX_NAMESPACE_ITEMS:
snl.append(MAX_NAMESPACE_WARNING)
break
is_valid = [True]
rk = repr_ltd(i, REPR_ID_LENGTH, encoding = ENCODING_RAW_I)
e = {}
e[DICT_KEY_EXPR] = as_unicode('_RPDB2_FindRepr((%s), %d)["%s"]' % (expr, REPR_ID_LENGTH, rk.replace('"', '"')))
e[DICT_KEY_NAME] = repr_ltd(i, repr_limit, encoding)
e[DICT_KEY_REPR] = repr_ltd(i, repr_limit, encoding, is_valid)
e[DICT_KEY_IS_VALID] = is_valid[0]
e[DICT_KEY_TYPE] = as_unicode(parse_type(type(i)))
e[DICT_KEY_N_SUBNODES] = self.__calc_number_of_subnodes(i)
snl.append(e)
return snl
except NameError:
pass
if isinstance(r, sets.BaseSet):
if len(r) > MAX_SORTABLE_LENGTH:
g = r
else:
g = [i for i in r]
sort(g)
for i in g:
if len(snl) >= MAX_NAMESPACE_ITEMS:
snl.append(MAX_NAMESPACE_WARNING)
break
is_valid = [True]
rk = repr_ltd(i, REPR_ID_LENGTH, encoding = ENCODING_RAW_I)
e = {}
e[DICT_KEY_EXPR] = as_unicode('_RPDB2_FindRepr((%s), %d)["%s"]' % (expr, REPR_ID_LENGTH, rk.replace('"', '"')))
e[DICT_KEY_NAME] = repr_ltd(i, repr_limit, encoding)
e[DICT_KEY_REPR] = repr_ltd(i, repr_limit, encoding, is_valid)
e[DICT_KEY_IS_VALID] = is_valid[0]
e[DICT_KEY_TYPE] = as_unicode(parse_type(type(i)))
e[DICT_KEY_N_SUBNODES] = self.__calc_number_of_subnodes(i)
snl.append(e)
return snl
if isinstance(r, list) or isinstance(r, tuple):
for i, v in enumerate(r[0: MAX_NAMESPACE_ITEMS]):
is_valid = [True]
e = {}
e[DICT_KEY_EXPR] = as_unicode('(%s)[%d]' % (expr, i))
e[DICT_KEY_NAME] = as_unicode(repr(i))
e[DICT_KEY_REPR] = repr_ltd(v, repr_limit, encoding, is_valid)
e[DICT_KEY_IS_VALID] = is_valid[0]
e[DICT_KEY_TYPE] = as_unicode(parse_type(type(v)))
e[DICT_KEY_N_SUBNODES] = self.__calc_number_of_subnodes(v)
snl.append(e)
if len(r) > MAX_NAMESPACE_ITEMS:
snl.append(MAX_NAMESPACE_WARNING)
return snl
if isinstance(r, dict):
if filter_level == 2 and expr in ['locals()', 'globals()']:
r = copy.copy(r)
for k, v in list(r.items()):
if parse_type(type(v)) in ['function', 'classobj', 'type']:
del r[k]
if len(r) > MAX_SORTABLE_LENGTH:
kl = r
else:
kl = list(r.keys())
sort(kl)
for k in kl:
#
# Remove any trace of session password from data structures that
# go over the network.
#
if k in ['_RPDB2_FindRepr', '_RPDB2_builtins', '_rpdb2_args', '_rpdb2_pwd', '<PASSWORD>']:
continue
v = r[k]
if len(snl) >= MAX_NAMESPACE_ITEMS:
snl.append(MAX_NAMESPACE_WARNING)
break
is_valid = [True]
e = {}
if [True for t in [bool, int, float, bytes, str, unicode, type(None)] if t is type(k)]:
rk = repr(k)
if len(rk) < REPR_ID_LENGTH:
e[DICT_KEY_EXPR] = as_unicode('(%s)[%s]' % (expr, rk))
if type(k) is str8:
rk = repr(k)
if len(rk) < REPR_ID_LENGTH:
e[DICT_KEY_EXPR] = as_unicode('(%s)[str8(%s)]' % (expr, rk[1:]))
if not DICT_KEY_EXPR in e:
rk = repr_ltd(k, REPR_ID_LENGTH, encoding = ENCODING_RAW_I)
e[DICT_KEY_EXPR] = as_unicode('_RPDB2_FindRepr((%s), %d)["%s"]' % (expr, REPR_ID_LENGTH, rk.replace('"', '"')))
e[DICT_KEY_NAME] = as_unicode([repr_ltd(k, repr_limit, encoding), k][fForceNames])
e[DICT_KEY_REPR] = repr_ltd(v, repr_limit, encoding, is_valid)
e[DICT_KEY_IS_VALID] = is_valid[0]
e[DICT_KEY_TYPE] = as_unicode(parse_type(type(v)))
e[DICT_KEY_N_SUBNODES] = self.__calc_number_of_subnodes(v)
snl.append(e)
return snl
al = calc_attribute_list(r, filter_level)
sort(al)
for a in al:
if a == 'm_rpdb2_pwd':
continue
try:
v = getattr(r, a)
except AttributeError:
continue
if len(snl) >= MAX_NAMESPACE_ITEMS:
snl.append(MAX_NAMESPACE_WARNING)
break
is_valid = [True]
e = {}
e[DICT_KEY_EXPR] = as_unicode('(%s).%s' % (expr, a))
e[DICT_KEY_NAME] = as_unicode(a)
e[DICT_KEY_REPR] = repr_ltd(v, repr_limit, encoding, is_valid)
e[DICT_KEY_IS_VALID] = is_valid[0]
e[DICT_KEY_TYPE] = as_unicode(parse_type(type(v)))
e[DICT_KEY_N_SUBNODES] = self.__calc_number_of_subnodes(v)
snl.append(e)
return snl
def get_exception(self, frame_index, fException):
ctx = self.get_current_ctx()
if is_py3k():
exc_info = | |
<reponame>takehuge/PYQUM
'''Basic QuBit Characterizations'''
from colorama import init, Fore, Back
init(autoreset=True) #to convert termcolor to wins color
from os.path import basename as bs
mdlname = bs(__file__).split('.')[0] # instrument-module's name e.g. ENA, PSG, YOKO
from time import time, sleep
from numpy import linspace, sin, pi, prod, array, mean, sqrt, zeros, float64, ceil, power, arctan2, floor
from flask import request, session, current_app, g, Flask
from pyqum.instrument.modular import VSA
from pyqum.instrument.benchtop import TKAWG as AWG
from pyqum.instrument.benchtop import PSGV as PSG0
from pyqum.instrument.benchtop import PSGA as PSG1
from pyqum.instrument.benchtop import ENA, YOKO
from pyqum.instrument.logger import settings, clocker, get_status, set_status, status_code
from pyqum.instrument.analyzer import curve, IQAP, UnwraPhase, IQAParray
from pyqum.instrument.toolbox import cdatasearch, gotocdata, waveform, squarewave
__author__ = "<NAME>"
__copyright__ = "Copyright 2019, The Pyqum Project"
__credits__ = ["<NAME>"]
__license__ = "GPL"
__version__ = "beta3"
__email__ = "<EMAIL>"
__status__ = "development"
yoko_choice = 0 # Left: 0 for coil; Right: 1 for Z-Line
# **********************************************************************************************************************************************************
# 1. FREQUENCY RESPONSE MEASUREMENT:
@settings(2) # data-density
def F_Response(user, tag="", corder={}, comment='', dayindex='', taskentry=0, resumepoint=0, instr=['YOKO','ENA'], testeach=False):
'''Characterizing Frequency Response:
C-Order: Flux-Bias, S-Parameter, IF-Bandwidth, Power, Frequency
'''
sample = get_status("MSSN")[session['user_name']]['sample']
# pushing pre-measurement parameters to settings:
yield user, sample, tag, instr, corder, comment, dayindex, taskentry, testeach
set_status("F_Response", dict(active=instr))
# User-defined Controlling-PARAMETER(s) ======================================================================================
fluxbias = waveform(corder['Flux-Bias'])
Sparam = waveform(corder['S-Parameter'])
ifb = waveform(corder['IF-Bandwidth'])
powa = waveform(corder['Power'])
freq = waveform(corder['Frequency'])
# Total data points:
datasize = prod([waveform(x).count for x in corder.values()]) * 2 #data density of 2 due to IQ
# Pre-loop settings:
# ENA:
bench = ENA.Initiate(True)
ENA.dataform(bench, action=['Set', 'REAL'])
ENA.sweep(bench, action=['Set', 'ON', freq.count])
fstart, fstop = freq.data[0]*1e9, freq.data[-1]*1e9
ENA.linfreq(bench, action=['Set', fstart, fstop]) # Linear Freq-sweep-range
# YOKO:
if "opt" not in fluxbias.data: # check if it is in optional-state
yokog = YOKO.Initiate(current=True, which=yoko_choice) # PENDING option: choose between Voltage / Current output
YOKO.output(yokog, 1)
# Buffer setting(s) for certain loop(s):
buffersize_1 = freq.count * 2 #data density of 2 due to IQ
# User-defined Measurement-FLOW ==============================================================================================
if testeach: # measure-time contribution from each measure-loop
loopcount, loop_dur = [], []
stage, prev = clocker(0) # Marking starting point of time
# Registerring parameter(s)-structure
cstructure = [fluxbias.count,Sparam.count,ifb.count,powa.count]
# set previous parameters based on resumepoint:
if resumepoint > 0:
caddress = cdatasearch(resumepoint//buffersize_1, cstructure)
# Only those involved in virtual for-loop need to be pre-set here:
if "opt" not in fluxbias.data: # check if it is in optional-state
YOKO.sweep(yokog, str(fluxbias.data[caddress[0]]), pulsewidth=77*1e-3, sweeprate=0.0007) # A-mode: sweeprate=0.0007 A/s ; V-mode: sweeprate=0.07 V/s
ENA.setrace(bench, Mparam=[Sparam.data[caddress[1]]], window='D1')
ENA.ifbw(bench, action=['Set', ifb.data[caddress[2]]])
measure_loop_1 = range(resumepoint//buffersize_1,datasize//buffersize_1) # saving chunck by chunck improves speed a lot!
while True:
for i in measure_loop_1:
# Registerring parameter(s)
caddress = cdatasearch(i, cstructure)
# setting each c-order (From High to Low level of execution):
if not i%prod(cstructure[1::]): # virtual for-loop using exact-multiples condition
if "opt" not in fluxbias.data: # check if it is in optional-state
if testeach: # test each measure-loop:
loopcount += [fluxbias.count]
if fluxbias.count > 1: loop_dur += [abs(fluxbias.data[0]-fluxbias.data[1])/0.2 + 35*1e-3]
else: loop_dur += [0]
stage, prev = clocker(stage, prev) # Marking time
else: YOKO.sweep(yokog, str(fluxbias.data[caddress[0]]), pulsewidth=77*1e-3, sweeprate=0.0007) # A-mode: sweeprate=0.0007 A/s ; V-mode: sweeprate=0.07 V/s
if not i%prod(cstructure[2::]): # virtual for-loop using exact-multiples condition
ENA.setrace(bench, Mparam=[Sparam.data[caddress[1]]], window='D1')
if not i%prod(cstructure[3::]): # virtual for-loop using exact-multiples condition
ENA.ifbw(bench, action=['Set', ifb.data[caddress[2]]])
ENA.power(bench, action=['Set', powa.data[caddress[3]]]) # same as the whole measure-loop
# start sweeping:
stat = ENA.sweep(bench) #getting the estimated sweeping time
print("Time-taken for this loop would be: %s (%spts)" %(stat[1]['TIME'], stat[1]['POINTS']))
print("Operation Complete: %s" %bool(ENA.measure(bench)))
# adjusting display on ENA:
ENA.autoscal(bench)
ENA.selectrace(bench, action=['Set', 'para 1 calc 1'])
data = ENA.sdata(bench)
# print(Fore.YELLOW + "\rProgress: %.3f%% [%s]" %((i+1)/datasize*100, data), end='\r', flush=True)
print(Fore.YELLOW + "\rProgress: %.3f%%" %((i+1)/datasize*buffersize_1*100), end='\r', flush=True)
# test for the last loop if there is
if testeach: # test each measure-loop:
loopcount += [len(measure_loop_1)]
loop_dur += [time() - prev]
stage, prev = clocker(stage, prev) # Marking time
ENA.close(bench)
if "opt" not in fluxbias.data: # check if it is in optional-state
YOKO.close(yokog, False)
yield loopcount, loop_dur
else:
if get_status("F_Response")['pause']:
break
else:
yield data
if not get_status("F_Response")['repeat']:
set_status("F_Response", dict(pause=True))
ENA.close(bench)
if "opt" not in fluxbias.data: # check if it is in optional-state
YOKO.close(yokog, False)
return
# **********************************************************************************************************************************************************
# 2. CONTINUOUS-WAVE SWEEPING:
@settings(2) # data-density
def CW_Sweep(user, tag="", corder={}, comment='', dayindex='', taskentry=0, resumepoint=0, instr=['PSG','YOKO','ENA'], testeach=False):
'''Continuous Wave Sweeping:
C-Order: Flux-Bias, XY-Frequency, XY-Power, S-Parameter, IF-Bandwidth, Frequency, Power
'''
sample = get_status("MSSN")[session['user_name']]['sample']
# pushing pre-measurement parameters to settings:
yield user, sample, tag, instr, corder, comment, dayindex, taskentry, testeach
set_status("CW_Sweep", dict(active=instr))
# User-defined Controlling-PARAMETER(s) ======================================================================================
fluxbias = waveform(corder['Flux-Bias'])
xyfreq = waveform(corder['XY-Frequency'])
xypowa = waveform(corder['XY-Power'])
Sparam = waveform(corder['S-Parameter'])
ifb = waveform(corder['IF-Bandwidth'])
freq = waveform(corder['Frequency'])
# special treatment to power in this CW-Mode Sweeping:
powa = waveform(corder['Power'])
powa_repeat = powa.inner_repeat
print("power sequence: %s, length: %s, inner-repeat-counts: %s" %(powa.command, powa.count, powa_repeat))
# input("continue?")
# Total data points:
datasize = int(prod([waveform(x).count * waveform(x).inner_repeat for x in corder.values()], dtype='uint64')) * 2 #data density of 2 due to IQ
print("data size: %s" %datasize)
# Pre-loop settings:
# ENA:
bench = ENA.Initiate(True)
ENA.dataform(bench, action=['Set', 'REAL'])
if powa_repeat == 1:
# collect swept power-data every measure-loop
ENA.sweep(bench, action=['Set', 'ON', powa.count])
ENA.power(bench, action=['Set', '', powa.data[0], powa.data[-1]]) # for power sweep (set pstart & pstop)
buffersize_1 = powa.count * 2 # (buffer) data density of 2 due to IQ
else:
# collect repetitive power-data every measure-loop
ENA.sweep(bench, action=['Set', 'ON', powa_repeat])
buffersize_1 = powa_repeat * 2 # (buffer) data density of 2 due to IQ
# YOKO:
if "opt" not in fluxbias.data: # check if it is in optional-state / serious-state
yokog = YOKO.Initiate(current=True, which=yoko_choice) # pending option
YOKO.output(yokog, 1)
# PSG:
if "opt" not in xyfreq.data: # check if it is in optional-state / serious-state
sogo = PSG0.Initiate() # pending option
PSG0.rfoutput(sogo, action=['Set', 1])
# User-defined Measurement-FLOW ==============================================================================================
if testeach: # measure-time contribution from each measure-loop
loopcount, loop_dur = [], []
stage, prev = clocker(0) # Marking starting point of time
# Registerring parameter(s)-structure
if powa_repeat == 1: cstructure = [fluxbias.count, xyfreq.count, xypowa.count, Sparam.count, ifb.count, freq.count, 1] # just single CW
else: cstructure = [fluxbias.count, xyfreq.count, xypowa.count, Sparam.count, ifb.count, freq.count, powa.count] # take CW average by repeating
# set previous parameters based on resumepoint:
if resumepoint//buffersize_1 > 0:
caddress = cdatasearch(resumepoint//buffersize_1, cstructure)
# Only those involved in virtual for-loop need to be pre-set here:
# Optionals:
if "opt" not in fluxbias.data: # check if it is in optional-state / serious-state
YOKO.sweep(yokog, str(fluxbias.data[caddress[0]]), pulsewidth=77*1e-3, sweeprate=0.0007) # A-mode: sweeprate=0.0007 A/s ; V-mode: sweeprate=0.07 V/s
if "opt" not in xyfreq.data: # check if it is in optional-state / serious-state
PSG0.frequency(sogo, action=['Set', str(xyfreq.data[caddress[1]]) + "GHz"])
PSG0.power(sogo, action=['Set', str(xypowa.data[caddress[2]]) + "dBm"])
# Basics:
ENA.setrace(bench, Mparam=[Sparam.data[caddress[3]]], window='D1')
ENA.ifbw(bench, action=['Set', ifb.data[caddress[4]]])
ENA.cwfreq(bench, action=['Set', freq.data[caddress[5]]*1e9])
measure_loop_1 = range(resumepoint//buffersize_1,datasize//buffersize_1) # saving chunck by chunck improves speed a lot!
while True:
for i in measure_loop_1:
# determining the index-locations for each parameters, i.e. the address at any instance
caddress = cdatasearch(i, cstructure)
# setting each c-order (From High to Low level of execution):
# ***************************************************************
# Optionals:
if not i%prod(cstructure[1::]): # virtual for-loop using exact-multiples condition
if "opt" not in fluxbias.data: # check if it is in optional-state
if testeach: # adding instrument transition-time between set-values:
loopcount += [fluxbias.count]
if fluxbias.count > 1: loop_dur += [abs(fluxbias.data[0]-fluxbias.data[1])/0.2 + 35*1e-3]
else: loop_dur += [0]
stage, prev = clocker(stage, prev) # Marking time
else: YOKO.sweep(yokog, str(fluxbias.data[caddress[0]]), pulsewidth=77*1e-3, sweeprate=0.0007) # A-mode: sweeprate=0.0007 A/s ; V-mode: sweeprate=0.07 V/s
if not i%prod(cstructure[2::]): # virtual for-loop using exact-multiples condition
if "opt" not in xyfreq.data: # check if it is in optional-state
PSG0.frequency(sogo, action=['Set', str(xyfreq.data[caddress[1]]) + "GHz"])
if not i%prod(cstructure[3::]): # virtual for-loop using exact-multiples condition
if "opt" not in xypowa.data: # check if it is in optional-state
PSG0.power(sogo, action=['Set', str(xypowa.data[caddress[2]]) + "dBm"])
# Basics:
if not i%prod(cstructure[4::]): # virtual for-loop using exact-multiples condition
ENA.setrace(bench, Mparam=[Sparam.data[caddress[3]]], window='D1')
if not i%prod(cstructure[5::]): # virtual for-loop using exact-multiples condition
ENA.ifbw(bench, action=['Set', ifb.data[caddress[4]]])
if not i%prod(cstructure[6::]): # virtual for-loop using exact-multiples condition
ENA.cwfreq(bench, action=['Set', freq.data[caddress[5]]*1e9])
if powa_repeat > 1:
ENA.power(bench, action=['Set', '', powa.data[caddress[6]], powa.data[caddress[6]]]) # same as the whole measure-loop
# | |
matrix_type = info_map[ subroutine ][ 'argument_map' ][ matrix_arg ][ 'code' ][ 'level_1_static_assert' ]
matrix_with_trans += [ matrix_type ]
else:
matrix_wo_trans.append( info_map[ subroutine ][ 'argument_map' ][ matrix_arg ][ 'code' ][ 'level_1_static_assert' ] )
#
# Matrices have trans options in this case. If there is one without,
# that one will determine the order of the call
#
if has_trans:
includes += [ '#include <boost/numeric/bindings/trans_tag.hpp>' ]
if len( matrix_wo_trans )>0:
typedef_list.insert( 0, 'typedef typename result_of::data_order< ' + matrix_wo_trans[0] + \
' >::type order;' )
includes += [ '#include <boost/numeric/bindings/data_order.hpp>' ]
else:
typedef_list.insert( 0, 'typedef typename detail::default_order< ' + matrix_with_trans[0] + \
' >::type order;' )
includes += [ '#include <boost/numeric/bindings/blas/detail/default_order.hpp>' ]
else:
# so, there's no trans option
# but, what if there's an order? (e.g., syr) -- then use `
if "has_cblas_order_arg" in info_map[ subroutine ]:
typedef_list.insert( 0, 'typedef typename result_of::data_order< ' + matrix_wo_trans[0] + \
' >::type order;' )
includes += [ '#include <boost/numeric/bindings/data_order.hpp>' ]
#
# Add an include in case of the uplo or diag options
#
if 'UPLO' in info_map[ subroutine ][ 'arguments' ]:
includes += [ '#include <boost/numeric/bindings/uplo_tag.hpp>' ]
if 'DIAG' in info_map[ subroutine ][ 'arguments' ]:
includes += [ '#include <boost/numeric/bindings/diag_tag.hpp>' ]
#
# Create static assertions, first by value type
#
has_comment = False
for value_type_tmp_key in info_map[ subroutine ][ 'grouped_arguments' ][ 'by_value_type' ].keys():
# look up whether they are template params
static_asserts = []
for arg in info_map[ subroutine ][ 'grouped_arguments' ][ 'by_value_type' ][ value_type_tmp_key ]:
if info_map[ subroutine ][ 'argument_map' ][ arg ][ 'code' ][ 'level_1_type' ] != None:
static_asserts.append( arg )
if len(static_asserts)>1:
arg_A = static_asserts[0]
for arg_B in static_asserts[1:]:
print "Adding static assert for argA", arg_A, " argb", arg_B
arg_left = info_map[ subroutine ][ 'argument_map' ][ arg_A ][ 'code' ][ 'level_1_static_assert' ]
arg_right = info_map[ subroutine ][ 'argument_map' ][ arg_B ][ 'code' ][ 'level_1_static_assert' ]
if arg_left != None and arg_right != None:
assert_line = 'BOOST_STATIC_ASSERT( (is_same< ' + \
'typename remove_const< typename $NAMESPACEvalue_type< ' + arg_left + ' >::type >::type, ' + \
'typename remove_const< typename $NAMESPACEvalue_type< ' + arg_right + ' >::type >::type' \
' >::value) );'
if not has_comment:
#level1_static_assert_list += [ '// Here, we assert... ' ]
has_comment = True
level1_static_assert_list += [ assert_line ]
#
# Matrices should adhere to their storage scheme
#
if 'matrix' in info_map[ subroutine ][ 'grouped_arguments' ][ 'by_type' ]:
for matrix_id in info_map[ subroutine ][ 'grouped_arguments' ][ 'by_type' ][ 'matrix' ]:
info_map_item = info_map[ subroutine ][ 'argument_map' ][ matrix_id ]
if 'banded' in info_map_item and info_map_item[ 'banded' ] == True and \
info_map_item[ 'code' ][ 'level_1_type' ] != None:
assert_line = 'BOOST_STATIC_ASSERT( ($NAMESPACEhas_band_array< ' + info_map_item[ 'code' ][ 'level_1_static_assert' ] + \
' >::value) );'
level1_static_assert_list += [ assert_line ]
includes += [ "#include <boost/numeric/bindings/has_band_array.hpp>" ]
elif 'packed' in info_map_item and info_map_item[ 'packed' ] == True and \
info_map_item[ 'code' ][ 'level_1_type' ] != None:
assert_line = 'BOOST_STATIC_ASSERT( ($NAMESPACEhas_triangular_array< ' + info_map_item[ 'code' ][ 'level_1_static_assert' ] + \
' >::value) );'
level1_static_assert_list += [ assert_line ]
includes += [ "#include <boost/numeric/bindings/has_triangular_array.hpp>" ]
elif info_map_item[ 'code' ][ 'level_1_type' ] != None:
assert_line = 'BOOST_STATIC_ASSERT( ($NAMESPACEhas_linear_array< ' + info_map_item[ 'code' ][ 'level_1_static_assert' ] + \
' >::value) );'
level1_static_assert_list += [ assert_line ]
includes += [ "#include <boost/numeric/bindings/has_linear_array.hpp>" ]
#
# Vectors should have linear arrays
#
if 'vector' in info_map[ subroutine ][ 'grouped_arguments' ][ 'by_type' ]:
for vector_id in info_map[ subroutine ][ 'grouped_arguments' ][ 'by_type' ][ 'vector' ]:
info_map_item = info_map[ subroutine ][ 'argument_map' ][ vector_id ]
if 'ref_stride' in info_map_item and info_map_item[ 'code' ][ 'level_1_type' ] != None:
assert_line = 'BOOST_STATIC_ASSERT( ($NAMESPACEhas_linear_array< ' + info_map_item[ 'code' ][ 'level_1_static_assert' ] + \
' >::value) );'
level1_static_assert_list += [ assert_line ]
includes += [ "#include <boost/numeric/bindings/has_linear_array.hpp>" ]
# Make sure the mutable stuff is mutable
if 'output' in info_map[ subroutine ][ 'grouped_arguments' ][ 'by_io' ]:
for arg in info_map[ subroutine ][ 'grouped_arguments' ][ 'by_io' ][ 'output' ]:
if info_map[ subroutine ][ 'argument_map' ][ arg ][ 'code' ][ 'level_1_type' ] != None:
assert_line = 'BOOST_STATIC_ASSERT( ($NAMESPACEis_mutable< ' + \
info_map[ subroutine ][ 'argument_map' ][ arg ][ 'code' ][ 'level_1_static_assert' ] + ' >::value) );'
level1_static_assert_list += [ assert_line ]
# import the code by argument
for arg in info_map[ subroutine ][ 'arguments' ]:
level0_arg_list += [ info_map[ subroutine ][ 'argument_map' ][ arg ][ 'code' ][ 'call_level_0' ] ]
if info_map[ subroutine ][ 'argument_map' ][ arg ][ 'code' ][ 'level_1' ] != None:
level1_arg_list += [ info_map[ subroutine ][ 'argument_map' ][ arg ][ 'code' ][ 'level_1' ] ]
if info_map[ subroutine ][ 'argument_map' ][ arg ][ 'code' ][ 'call_level_1' ] != None:
call_level1_arg_list += [ info_map[ subroutine ][ 'argument_map' ][ arg ][ 'code' ][ 'call_level_1' ] ]
if info_map[ subroutine ][ 'argument_map' ][ arg ][ 'code' ][ 'level_1_type' ] != None and \
info_map[ subroutine ][ 'argument_map' ][ arg ][ 'code' ][ 'level_1_type' ] not in level1_type_arg_list:
level1_type_arg_list += [ info_map[ subroutine ][ 'argument_map' ][ arg ][ 'code' ][ 'level_1_type' ] ]
if info_map[ subroutine ][ 'argument_map' ][ arg ][ 'code' ][ 'level_1_assert' ] != []:
level1_assert_list += info_map[ subroutine ][ 'argument_map' ][ arg ][ 'code' ][ 'level_1_assert' ]
if info_map[ subroutine ][ 'argument_map' ][ arg ][ 'code' ][ 'typedef' ] != None:
# make sure trans tags always preceed other tags, as they may be dependant
if 'TRANS' in arg:
at_i = 0
if len(typedef_list)>0 and '_order<' in typedef_list[0]:
at_i = 1
typedef_list.insert( at_i, info_map[ subroutine ][ 'argument_map' ][ arg ][ 'code' ][ 'typedef' ] )
else:
typedef_list.append( info_map[ subroutine ][ 'argument_map' ][ arg ][ 'code' ][ 'typedef' ] )
if info_map[ subroutine ][ 'argument_map' ][ arg ][ 'code' ][ 'level_2' ] != None:
level2_arg_list += [ info_map[ subroutine ][ 'argument_map' ][ arg ][ 'code' ][ 'level_2' ] ]
if 'banded' in info_map[ subroutine ][ 'argument_map' ][ arg ]:
includes += [ '#include <boost/numeric/bindings/bandwidth.hpp>' ]
# Insert the order_type() if appropriate
if "has_cblas_order_arg" in info_map[ subroutine ] and \
info_map[ subroutine ][ 'has_cblas_order_arg' ]:
level0_arg_list.insert( 0, "order()" )
# Level 1 replacements
level1_template = level1_template.replace( "$TYPEDEFS", "\n ".join( typedef_list ) )
level1_template = level1_template.replace( "$CALL_LEVEL0", ", ".join( level0_arg_list ) )
level1_template = level1_template.replace( "$CALL_LEVEL1", ", ".join( call_level1_arg_list ) )
level1_template = level1_template.replace( "$LEVEL1", ", ".join( level1_arg_list ) )
level1_template = level1_template.replace( "$TYPES", ", ".join( level1_type_arg_list ) )
level1_template = level1_template.replace( "$ASSERTS", "\n ".join( sorted( level1_assert_list ) ) )
level1_template = level1_template.replace( '$RESULT_TYPE', info_map[ subroutine ][ 'level1_result_type' ] )
level1_template = level1_template.replace( '$RETURN_STATEMENT', info_map[ subroutine ][ 'return_statement' ] )
level1_template = level1_template.replace( "$KEYWORDS", ", ".join( keyword_type_list ) )
if len( level1_static_assert_list ) > 0:
level1_template = level1_template.replace( "$STATIC_ASSERTS", "\n ".join( level1_static_assert_list ) )
else:
level1_template = level1_template.replace( "\n $STATIC_ASSERTS", "" )
# Level 2 replacements
# some special stuff is done here, such as replacing real_type with a
# type-traits deduction, etc..
# more important: all non-const and const variants of functions are written here
level2_functions = []
level2_arg_lists, level2_comments = \
bindings.generate_const_variants( group_name.lower() + '.' + value_type, \
level2_arg_list, template_map )
for level2_idx in range( 0, len( level2_arg_lists ) ):
level2_function = level2_template.replace( "$LEVEL2", \
", ".join( level2_arg_lists[ level2_idx ] ) )
if len( "".join(level2_comments[ level2_idx ] ) ) > 0:
level2_function = level2_function.replace( "$COMMENTS", \
"\n".join( level2_comments[ level2_idx ] ) )
level2_functions.append( level2_function )
level2_template = "\n".join( level2_functions )
level2_template = level2_template.replace( "$COMMENTS\n", "" )
#level2_template = level2_template.replace( "$LEVEL2", ", ".join( level2_arg_list ) )
if len(level1_type_arg_list)>0:
my_key = group_name_l + '.' + value_type + '.first_typename'
if netlib.my_has_key( my_key, template_map ):
first_typename = template_map[ netlib.my_has_key( \
my_key, template_map ) ].strip()
else:
first_typename = ''
for tn in level1_type_arg_list:
bare_type = tn.split(" ")[-1]
if first_typename == '' and bare_type[:6].lower() in [ 'matrix', 'vector' ]:
first_typename = bare_type
first_typename_datatype = first_typename[:6].lower() # 'matrix' or 'vector' or 'scalar'
else:
level1_type_arg_list.insert( 0, 'typename | |
#!/usr/bin/python2
# -*- coding: utf-8 -*-
import os
import json
import requests
from tabulate import tabulate
from HTMLParser import HTMLParser
log = []
h = HTMLParser()
class MLStripper(HTMLParser):
def __init__(self):
self.reset()
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def get_data(self):
return ''.join(self.fed)
def strip_newlines(text):
text = text.replace('<br>', ' ')
text = text.replace('<br />', ' ')
text = text.replace('<br/>', ' ')
text = text.replace('\n', ' ')
return text
def strip_html(text):
if text == None:
return str(text)
text = h.unescape(strip_newlines(text))
s = MLStripper()
s.feed(text)
return s.get_data().strip()
def clear():
os.system('cls' if os.name == 'nt' else 'clear')
def get_response(request):
try:
req = requests.get('https://data.police.uk/api/%s' % request)
data = json.loads(req.text.encode('utf8'))
except ValueError:
return 1, '\nSomething went wrong! Server responded with status code %d. Remember options are case sensitive.' % req.status_code
except requests.exceptions.ConnectionError:
return 2, '\nSomething went wrong! Check your internet connection.'
return 0, data
def get_options(required, logging=True):
got_options = []
print
for option in required:
i = raw_input('%s: ' % option).strip()
got_options.append(i)
confirm = raw_input('Are those options correct? ').strip().lower()
if confirm == 'y' or confirm == 'yes':
if logging == True:
index = 0
for option in got_options:
logit('%s: %s\n' % (required[index], option))
index += 1
logit('\n')
return got_options
else:
return get_options(required)
def grab(element, field, struct='{s}'):
try:
data = element[field]
if not data or data == None:
return ''
except:
return ''
if isinstance(data, basestring):
return struct.replace('{s}', strip_html(data))
return True
def logit(line):
global log
log.append(line)
def crime_last_updated():
err, resp = get_response('crime-last-updated')
if not err:
return validate('Crime data was last updated on: %s\n' % resp['date'])
else:
logit('Server error.\n\n' if err == 1 else 'Connection error.\n\n')
return resp
def crime_categories():
output = [['Name', 'ID']]
err, resp = get_response('crime-categories')
if not err:
for entity in resp:
output.append([entity['name'],
entity['url']])
return validate(output)
else:
logit('Server error.\n\n' if err == 1 else 'Connection error.\n\n')
return resp
def locate(options):
output = [['Force', 'Neighbourhood']]
err, resp = get_response('locate-neighbourhood?q=%s,%s' % (options[0], options[1]))
if not err:
output.append([resp['force'],
resp['neighbourhood']])
return validate(output)
elif err == 1:
logit('Invalid coordinates.\n\n')
return 'Invalid coordinates.'
else:
logit('Connection error.\n\n')
return resp
def forces():
output = [['Name', 'ID']]
err, resp = get_response('forces')
if not err:
for entity in resp:
output.append([entity['name'],
entity['id']])
return validate(output)
else:
logit('Server error.\n\n' if err == 1 else 'Connection error.\n\n')
return resp
def force_info(options):
output = ''
err, resp = get_response('forces/%s' % options[0])
if not err:
output += '%s\nPhone Number: %s\nURL: %s\n' % (resp['name'],
resp['telephone'],
resp['url'])
if len(resp['engagement_methods']):
output += 'Engagement Info:\n'
for entity in resp['engagement_methods']:
output += ' %s: %s\n' % (entity['title'],
entity['url'])
return validate(output)
else:
logit('Server error.\n\n' if err == 1 else 'Connection error.\n\n')
return resp
def force_officers(options):
output = ''
err, resp = get_response('forces/%s/people' % options[0])
if not err:
for officer in resp:
output += 'Name: %s\nRank: %s\n' % (officer['name'],
officer['rank'])
if len(officer['contact_details']):
output += 'Contact Details:\n'
for entry in officer['contact_details']:
output += ' %s: %s\n' % (entry.title(),
officer['contact_details'][entry])
output += 'Biography:\n %s\n\n' % strip_html(officer['bio'])
return validate(output)
else:
logit('Server error.\n\n' if err == 1 else 'Connection error.\n\n')
return resp
def neighbourhoods(options):
output = [['Name', 'ID']]
err, resp = get_response('%s/neighbourhoods' % options[0])
if not err:
for entity in resp:
output.append([entity['name'],
entity['id']])
return validate(output)
else:
logit('Server error.\n\n' if err == 1 else 'Connection error.\n\n')
return resp
def neighbourhood_info(options):
output = ''
err, resp = get_response('%s/%s' % (options[0], options[1]))
if not err:
output += '%s (%s) (Population: %s)\n\n' % (resp['name'],
resp['id'],
resp['population'])
output += grab(resp, 'welcome_message', 'Welcome message: {s}\n\n')
output += grab(resp, 'description', 'Description:\n {s}\n\n')
output += grab(resp['centre'], 'latitude', 'Coordinates of centre: {s} N,')
output += grab(resp['centre'], 'longitude', ' {s} W\n\n')
output += '%s%s\n' % (grab(resp, 'url_force', 'Force URL: {s}\n'),
grab(resp, 'url_boundary', 'Boundary URL: {s}\n'))
if grab(resp, 'links'):
output += 'Links:\n'
for link in resp['links']:
output += ' %s: %s\n' % (strip_html(link['title']),
link['url'])
output += grab(link, 'description', ' Description: {s}\n')
output += '\n'
if grab(resp, 'locations'):
output += 'Significant locations:\n'
for location in resp['locations']:
output += ' %s %s\n' % (grab(location, 'name'),
grab(location, 'type', '({s})'))
output += ' Address: %s\n Postcode: %s\n' % (grab(location, 'address'),
grab(location, 'postcode'))
output += grab(location, 'latitude', ' Coordinates: {s} N,')
output += grab(location, 'longitude', ' {s} W\n')
output += grab(location, 'telephone', ' Telephone number: {s}\n')
output += grab(location, 'description', ' Description:\n {s}\n')
output += '\n'
if len(resp['contact_details']):
output += 'Contact Details:\n'
for entry in resp['contact_details']:
output += ' %s: %s\n' % (entry.title(),
resp['contact_details'][entry])
return validate(output)
else:
logit('Server error.\n\n' if err == 1 else 'Connection error.\n\n')
return resp
def neighbourhood_boundary(options):
output = [['Latitude', 'Longitude']]
err, resp = get_response('%s/%s/boundary' % (options[0], options[1]))
if not err:
for pair in resp:
output.append([pair['latitude'], pair['longitude']])
return validate(output)
else:
logit('Server error.\n\n' if err == 1 else 'Connection error.\n\n')
return resp
def neighbourhood_officers(options):
output = ''
err, resp = get_response('%s/%s/people' % (options[0], options[1]))
if not err:
for officer in resp:
output += 'Name: %s\nRank: %s\n' % (officer['name'],
officer['rank'])
if len(officer['contact_details']):
output += 'Contact Details:\n'
for entry in officer['contact_details']:
output += ' %s: %s\n' % (entry.title(),
officer['contact_details'][entry])
output += 'Biography:\n %s\n\n' % strip_html(officer['bio'])
return validate(output)
else:
logit('Server error.\n\n' if err == 1 else 'Connection error.\n\n')
return resp
def neighbourhood_issues(options):
output = ''
err, resp = get_response('%s/%s/priorities' % (options[0], options[1]))
if not err:
for issue in resp:
output += grab(issue, 'issue-date', 'Issue date: {s}\n')
output += grab(issue, 'issue', 'Issue:\n {s}\n')
output += grab(issue, 'action-date', 'Action date: {s}\n')
output += grab(issue, 'action', 'Action:\n {s}\n')
output += '\n'
return validate(output)
else:
logit('Server error.\n\n' if err == 1 else 'Connection error.\n\n')
return resp
def neighbourhood_events(options):
output = ''
err, resp = get_response('%s/%s/events' % (options[0], options[1]))
if not err:
for event in resp:
output += grab(event, 'title')
output += grab(event, 'type', ' - ({s})\n')
output += grab(event, 'start_date', 'Date: {s}\n')
output += grab(event, 'address', 'Address: {s}\n')
output += grab(event, 'description', 'Description:\n {s}\n')
if len(event['contact_details']):
output += 'Contact Details:\n'
for entry in event['contact_details']:
output += ' %s: %s\n' % (entry.title(),
event['contact_details'][entry])
output += '\n'
return validate(output)
else:
logit('Server error.\n\n' if err == 1 else 'Connection error.\n\n')
return resp
def save():
global log
if log:
file_name = get_options(['File Name'], False)[0]
print '\nAppending logs to %s...' % file_name
try:
with open(file_name, 'a') as f:
for entry in log:
f.write(entry.encode('utf8'))
log = []
print 'Successfully saved data to %s.' % file_name
print 'Cleared logs from memory.'
except IOError:
print 'Couldn\'t open that file for writing!'
else:
print '\nThere\'s no logs to save!'
def validate(response):
if response:
if isinstance(response, basestring):
logit('%s\n' % response)
return '\n%s' % response.strip()
else:
formatted = tabulate(response, headers='firstrow', tablefmt='fancy_grid')
logit('%s\n\n' % formatted)
return '\n%s' % formatted
else:
logit('No data available.\n\n')
return '\nNo data available.'
def interactive():
print """
_____ __ ___ __ __
/ ___// /_____ ______/ (_)___ _/ /_ / /_
\__ \/ __/ __ `/ ___/ / / __ `/ __ \/ __/
___/ / /_/ /_/ / / / / / /_/ / / / / /_
/____/\__/\__,_/_/ /_/_/\__, /_/ /_/\__/
/____/
A simplified command-line interface for the official UK police API.
Please report any issues at https://github.com/libeclipse/starlight/issues
Use `help` for a list of commands."""
saveable = [
'last-updated',
'categories',
'locate',
'list',
'officers',
'info',
'boundary',
'issues',
'events'
]
while True:
command = raw_input('\n/ >> ').strip().lower()
if command in saveable:
logit('/ >> %s\n\n' % command)
if command == 'help' or command == 'ls':
print """
`help` - Prints this help message.
`crimes` - Crime related stuff.
`forces` - Force related stuff.
`neighbourhoods` - Neighbourhood related stuff.
`save` - Saves the whole session's logs to file.
`clear` - Clears the terminal window.
`exit` - Exits the program."""
elif 'crime' in command:
while True:
command = raw_input('\n/crimes/ >> ').strip().lower()
if command in saveable:
logit('/crimes/ >> %s\n\n' % command)
if command == 'help' or command == 'ls':
print """
`help` - Prints this help message.
`last-updated` - Returns the date the crime data was last updated.
`categories` - Lists all the crime categories and their IDs.
`save` - Saves the whole session's log to file.
`clear` - Clears the terminal window.
`back` - Back to main menu.
`exit` - Exits the program."""
elif command == 'last-updated':
print crime_last_updated()
elif command == 'categories':
print crime_categories()
elif command == 'save':
save()
elif command == 'clear':
clear()
| |
occurs that is
longer than `max_rate_limit` when making a request.
hikari.errors.RateLimitedError
Usually, Hikari will handle and retry on hitting
rate-limits automatically. This includes most bucket-specific
rate-limits and global rate-limits. In some rare edge cases,
however, Discord implements other undocumented rules for
rate-limiting, such as limits per attribute. These cannot be
detected or handled normally by Hikari due to their undocumented
nature, and will trigger this exception if they occur.
hikari.errors.InternalServerError
If an internal error occurs on Discord while handling the request.
"""
@abc.abstractmethod
async def create_message(
self,
channel: snowflakes.SnowflakeishOr[channels.TextChannel],
content: undefined.UndefinedOr[typing.Any] = undefined.UNDEFINED,
*,
embed: undefined.UndefinedOr[embeds_.Embed] = undefined.UNDEFINED,
attachment: undefined.UndefinedOr[files.Resourceish] = undefined.UNDEFINED,
attachments: undefined.UndefinedOr[typing.Sequence[files.Resourceish]] = undefined.UNDEFINED,
tts: undefined.UndefinedOr[bool] = undefined.UNDEFINED,
nonce: undefined.UndefinedOr[str] = undefined.UNDEFINED,
mentions_everyone: undefined.UndefinedOr[bool] = undefined.UNDEFINED,
user_mentions: undefined.UndefinedOr[
typing.Union[typing.Collection[snowflakes.SnowflakeishOr[users.PartialUser]], bool]
] = undefined.UNDEFINED,
role_mentions: undefined.UndefinedOr[
typing.Union[typing.Collection[snowflakes.SnowflakeishOr[guilds.PartialRole]], bool]
] = undefined.UNDEFINED,
) -> messages_.Message:
"""Create a message in the given channel.
Parameters
----------
channel : hikari.snowflakes.SnowflakeishOr[hikari.channels.TextChannel]
The channel to create the message in.
content : hikari.undefined.UndefinedOr[typing.Any]
If provided, the message contents. If
`hikari.undefined.UNDEFINED`, then nothing will be sent
in the content. Any other value here will be cast to a
`builtins.str`.
If this is a `hikari.embeds.Embed` and no `embed` kwarg is
provided, then this will instead update the embed. This allows for
simpler syntax when sending an embed alone.
Likewise, if this is a `hikari.files.Resource`, then the
content is instead treated as an attachment if no `attachment` and
no `attachments` kwargs are provided.
Other Parameters
----------------
embed : hikari.undefined.UndefinedOr[hikari.embeds.Embed]
If provided, the message embed.
attachment : hikari.undefined.UndefinedOr[hikari.files.Resourceish],
If provided, the message attachment. This can be a resource,
or string of a path on your computer or a URL.
attachments : hikari.undefined.UndefinedOr[typing.Sequence[hikari.files.Resourceish]],
If provided, the message attachments. These can be resources, or
strings consisting of paths on your computer or URLs.
tts : hikari.undefined.UndefinedOr[builtins.bool]
If provided, whether the message will be read out by a screen
reader using Discord's TTS (text-to-speech) system.
nonce : hikari.undefined.UndefinedOr[builtins.str]
An arbitrary identifier to associate with the message. This
can be used to identify it later in received events. If provided,
this must be less than 32 bytes. If not provided, then
a null value is placed on the message instead. All users can
see this value.
mentions_everyone : hikari.undefined.UndefinedOr[builtins.bool]
If provided, whether the message should parse @everyone/@here
mentions.
user_mentions : hikari.undefined.UndefinedOr[typing.Union[typing.Collection[hikari.snowflakes.SnowflakeishOr[hikari.users.PartialUser]], builtins.bool]]
If provided, and `builtins.True`, all user mentions will be detected.
If provided, and `builtins.False`, all user mentions will be ignored
if appearing in the message body.
Alternatively this may be a collection of
`hikari.snowflakes.Snowflake`, or
`hikari.users.PartialUser` derivatives to enforce mentioning
specific users.
role_mentions : hikari.undefined.UndefinedOr[typing.Union[typing.Collection[hikari.snowflakes.SnowflakeishOr[hikari.guilds.PartialRole]], builtins.bool]]
If provided, and `builtins.True`, all role mentions will be detected.
If provided, and `builtins.False`, all role mentions will be ignored
if appearing in the message body.
Alternatively this may be a collection of
`hikari.snowflakes.Snowflake`, or
`hikari.guilds.PartialRole` derivatives to enforce mentioning
specific roles.
!!! note
Attachments can be passed as many different things, to aid in
convenience.
- If a `pathlib.PurePath` or `builtins.str` to a valid URL, the
resource at the given URL will be streamed to Discord when
sending the message. Subclasses of
`hikari.files.WebResource` such as
`hikari.files.URL`,
`hikari.messages.Attachment`,
`hikari.emojis.Emoji`,
`EmbedResource`, etc will also be uploaded this way.
This will use bit-inception, so only a small percentage of the
resource will remain in memory at any one time, thus aiding in
scalability.
- If a `hikari.files.Bytes` is passed, or a `builtins.str`
that contains a valid data URI is passed, then this is uploaded
with a randomized file name if not provided.
- If a `hikari.files.File`, `pathlib.PurePath` or
`builtins.str` that is an absolute or relative path to a file
on your file system is passed, then this resource is uploaded
as an attachment using non-blocking code internally and streamed
using bit-inception where possible. This depends on the
type of `concurrent.futures.Executor` that is being used for
the application (default is a thread pool which supports this
behaviour).
Returns
-------
hikari.messages.Message
The created message.
Raises
------
builtins.ValueError
If more than 100 unique objects/entities are passed for
`role_mentions` or `user_mentions`.
builtins.TypeError
If both `attachment` and `attachments` are specified.
hikari.errors.BadRequestError
This may be raised in several discrete situations, such as messages
being empty with no attachments or embeds; messages with more than
2000 characters in them, embeds that exceed one of the many embed
limits; too many attachments; attachments that are too large;
invalid image URLs in embeds; users in `user_mentions` not being
mentioned in the message content; roles in `role_mentions` not
being mentioned in the message content.
hikari.errors.UnauthorizedError
If you are unauthorized to make the request (invalid/missing token).
hikari.errors.ForbiddenError
If you are missing the `SEND_MESSAGES` in the channel or the
person you are trying to message has the DM's disabled.
hikari.errors.NotFoundError
If the channel is not found.
hikari.errors.RateLimitTooLongError
Raised in the event that a rate limit occurs that is
longer than `max_rate_limit` when making a request.
hikari.errors.RateLimitedError
Usually, Hikari will handle and retry on hitting
rate-limits automatically. This includes most bucket-specific
rate-limits and global rate-limits. In some rare edge cases,
however, Discord implements other undocumented rules for
rate-limiting, such as limits per attribute. These cannot be
detected or handled normally by Hikari due to their undocumented
nature, and will trigger this exception if they occur.
hikari.errors.InternalServerError
If an internal error occurs on Discord while handling the request.
!!! warning
You are expected to make a connection to the gateway and identify
once before being able to use this endpoint for a bot.
""" # noqa: E501 - Line too long
@abc.abstractmethod
async def create_crossposts(
self,
channel: snowflakes.SnowflakeishOr[channels.GuildNewsChannel],
message: snowflakes.SnowflakeishOr[messages_.PartialMessage],
) -> messages_.Message:
"""Broadcast an announcement message.
Parameters
----------
channel : hikari.snowflakes.SnowflakeishOr[hikari.channels.GuildNewsChannel]
The object or ID of the news channel to crosspost a message in.
message : hikari.snowflakes.SnowflakeishOr[hikari.messages.PartialMessage]
The object or ID of the message to crosspost.
Returns
-------
hikari.messages.Message
The message object that was crossposted.
Raises
------
hikari.errors.BadRequestError
If you tried to crosspost a message that has already been broadcast.
hikari.errors.UnauthorizedError
If you are unauthorized to make the request (invalid/missing token).
hikari.errors.ForbiddenError
If you try to crosspost a message by the current user without the
`SEND_MESSAGES` permission for the target news channel or try to
crosspost a message by another user without both the `SEND_MESSAGES`
and `MANAGE_MESSAGES` permissions for the target channel.
hikari.errors.NotFoundError
If the channel or message is not found.
hikari.errors.RateLimitTooLongError
Raised in the event that a rate limit occurs that is
longer than `max_rate_limit` when making a request.
hikari.errors.RateLimitedError
Usually, Hikari will handle and retry on hitting
rate-limits automatically. This includes most bucket-specific
rate-limits and global rate-limits. In some rare edge cases,
however, Discord implements other undocumented rules for
rate-limiting, such as limits per attribute. These cannot be
detected or handled normally by Hikari due to their undocumented
nature, and will trigger this exception if they occur.
hikari.errors.InternalServerError
If an internal error occurs on Discord while handling the request.
"""
@abc.abstractmethod
async def edit_message(
self,
channel: snowflakes.SnowflakeishOr[channels.TextChannel],
message: snowflakes.SnowflakeishOr[messages_.PartialMessage],
content: undefined.UndefinedOr[typing.Any] = undefined.UNDEFINED,
*,
embed: undefined.UndefinedNoneOr[embeds_.Embed] = undefined.UNDEFINED,
mentions_everyone: undefined.UndefinedOr[bool] = undefined.UNDEFINED,
user_mentions: undefined.UndefinedOr[
typing.Union[typing.Collection[snowflakes.SnowflakeishOr[users.PartialUser]], bool]
] = undefined.UNDEFINED,
role_mentions: undefined.UndefinedOr[
typing.Union[typing.Collection[snowflakes.SnowflakeishOr[guilds.PartialRole]], bool]
] = undefined.UNDEFINED,
flags: undefined.UndefinedOr[messages_.MessageFlag] = undefined.UNDEFINED,
) -> messages_.Message:
"""Edit an existing message in a given channel.
Parameters
----------
channel : hikari.snowflakes.SnowflakeishOr[hikari.channels.TextChannel]
The channel to create the message in. This may be
the object or the ID of an existing channel.
message : hikari.snowflakes.SnowflakeishOr[hikari.messages.PartialMessage]
The message to edit. This may be the object or the ID
of an existing message.
content : hikari.undefined.UndefinedOr[typing.Any]
If provided, the message content to update with. If
`hikari.undefined.UNDEFINED`, then the content will not
be changed. If `builtins.None`, then the content will be removed.
Any other value will be cast to a `builtins.str` before sending.
If this is a `hikari.embeds.Embed` and no `embed` kwarg is
provided, then this will instead update the embed. This allows for
simpler syntax when sending an | |
# coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GEM: Generation Evaluation Metrics supporting datasets"""
from __future__ import absolute_import, division, print_function
import csv
import json
import os
import datasets
# TODO: Add BibTeX citation
_CITATION = """\
@InProceedings{huggingface:dataset,
title = {A great new dataset},
authors={huggingface, Inc.
},
year={2020}
}
"""
_DESCRIPTION = """\
GEM is a benchmark environment for Natural Language Generation with a focus on its Evaluation,
both through human annotations and automated Metrics.
GEM aims to:
- measure NLG progress across 13 datasets spanning many NLG tasks and languages.
- provide an in-depth analysis of data and models presented via data statements and challenge sets.
- develop standards for evaluation of generated text using both automated and human metrics.
It is our goal to regularly update GEM and to encourage toward more inclusive practices in dataset development
by extending existing data or developing datasets for additional languages.
"""
_HOMEPAGE = "https://gem-benchmark.github.io/"
_LICENSE = "CC-BY-SA-4.0"
_TASKS = {
"summarization": {
"mlsum": ["mlsum_de", "mlsum_es"],
"wiki_lingua": ["wiki_lingua_es_en", "wiki_lingua_ru_en", "wiki_lingua_tr_en", "wiki_lingua_vi_en"],
"xsum": ["xsum"],
},
"struct2text": {
"common_gen": ["common_gen"],
"cs_restaurants": ["cs_restaurants"],
"dart": ["dart"],
"e2e": ["e2e_nlg"],
"totto": ["totto"],
"web_nlg": ["web_nlg_en", "web_nlg_ru"],
},
"simplification": {
"wiki_auto_asset_turk": ["wiki_auto_asset_turk"],
},
"dialog": {
"schema_guided_dialog": ["schema_guided_dialog"],
},
}
_URLs = {
"common_gen": {
"data": "https://storage.googleapis.com/huggingface-nlp/datasets/common_gen/commongen_data.zip",
},
"cs_restaurants": {
"train": "https://raw.githubusercontent.com/UFAL-DSG/cs_restaurant_dataset/master/train.json",
"validation": "https://raw.githubusercontent.com/UFAL-DSG/cs_restaurant_dataset/master/devel.json",
"test": "https://raw.githubusercontent.com/UFAL-DSG/cs_restaurant_dataset/master/test.json",
},
"dart": {
"train": "https://raw.githubusercontent.com/Yale-LILY/dart/master/data/v1.1.1/dart-v1.1.1-full-train.json",
"validation": "https://raw.githubusercontent.com/Yale-LILY/dart/master/data/v1.1.1/dart-v1.1.1-full-dev.json",
"test": "https://raw.githubusercontent.com/Yale-LILY/dart/master/data/v1.1.1/dart-v1.1.1-full-test.json",
},
"e2e_nlg": {
"train": "https://github.com/tuetschek/e2e-cleaning/raw/master/cleaned-data/train-fixed.no-ol.csv",
"validation": "https://github.com/tuetschek/e2e-cleaning/raw/master/cleaned-data/devel-fixed.no-ol.csv",
"test": "https://github.com/tuetschek/e2e-cleaning/raw/master/cleaned-data/test-fixed.csv",
},
"mlsum_de": {
"train": "https://gitlab.lip6.fr/scialom/mlsum_data/-/raw/master/MLSUM/de_train.zip",
"validation": "https://gitlab.lip6.fr/scialom/mlsum_data/-/raw/master/MLSUM/de_val.zip",
"test": "https://gitlab.lip6.fr/scialom/mlsum_data/-/raw/master/MLSUM/de_test.zip",
"bad_ids": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_mlsum_bad_ids.json",
},
"mlsum_es": {
"train": "https://gitlab.lip6.fr/scialom/mlsum_data/-/raw/master/MLSUM/es_train.zip",
"validation": "https://gitlab.lip6.fr/scialom/mlsum_data/-/raw/master/MLSUM/es_val.zip",
"test": "https://gitlab.lip6.fr/scialom/mlsum_data/-/raw/master/MLSUM/es_test.zip",
"bad_ids": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_mlsum_bad_ids.json",
},
"schema_guided_dialog": {
"data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_sgd.json.zip",
},
"totto": {
"data": "https://storage.googleapis.com/totto/totto_data.zip",
},
"web_nlg_en": {
"train": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_web_nlg/webnlg_en_train.json",
"validation": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_web_nlg/webnlg_en_val.json",
"test": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_web_nlg/webnlg_en_test.json",
},
"web_nlg_ru": {
"train": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_web_nlg/webnlg_ru_train.json",
"validation": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_web_nlg/webnlg_ru_val.json",
"test": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_web_nlg/webnlg_ru_test.json",
},
"wiki_auto_asset_turk": {
"train": "https://github.com/chaojiang06/wiki-auto/raw/master/wiki-manual/train.tsv",
"validation": "https://github.com/chaojiang06/wiki-auto/raw/master/wiki-manual/dev.tsv",
},
"wiki_lingua_es_en": {
"data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua.zip",
},
"wiki_lingua_ru_en": {
"data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua.zip",
},
"wiki_lingua_tr_en": {
"data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua.zip",
},
"wiki_lingua_vi_en": {
"data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua.zip",
},
"xsum": {
"data": "http://bollin.inf.ed.ac.uk/public/direct/XSUM-EMNLP18-Summary-Data-Original.tar.gz",
"splits": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_xsum_confidence_0.8.json",
},
}
# Add Turk and Asset files
for i in range(10):
_URLs["wiki_auto_asset_turk"][
f"test_asset_{i}"
] = f"https://github.com/facebookresearch/asset/raw/master/dataset/asset.test.simp.{i}"
for i in range(8):
_URLs["wiki_auto_asset_turk"][
f"test_turk_{i}"
] = f"https://raw.githubusercontent.com/cocoxu/simplification/master/data/turkcorpus/GEM/test.8turkers.tok.turk.{i}"
_SGD_ACTS = [
"AFFIRM",
"AFFIRM_INTENT",
"CONFIRM",
"GOODBYE",
"INFORM",
"INFORM_COUNT",
"INFORM_INTENT",
"NEGATE",
"NEGATE_INTENT",
"NOTIFY_FAILURE",
"NOTIFY_SUCCESS",
"OFFER",
"OFFER_INTENT",
"REQUEST",
"REQUEST_ALTS",
"REQ_MORE",
"SELECT",
"THANK_YOU",
]
_XSUM_REMOVE_LINES = set(
[
"Share this with\n",
"Email\n",
"Facebook\n",
"Messenger\n",
"Twitter\n",
"Pinterest\n",
"WhatsApp\n",
"Linkedin\n",
"LinkedIn\n",
"Copy this link\n",
"These are external links and will open in a new window\n",
]
)
class Gem(datasets.GeneratorBasedBuilder):
"""GEM: datasets supporting the Generation Evaluation Metrics 2021 shared task."""
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name=conf,
version=datasets.Version("1.0.0"),
description=f"GEM benchmark: {task} task, {conf} subset",
)
for task, dset_confs in _TASKS.items()
for conf_list in dset_confs.values()
for conf in conf_list
]
DEFAULT_CONFIG_NAME = "common_gen" # First alphabetical
def _info(self):
if self.config.name == "common_gen":
features = datasets.Features(
{
"gem_id": datasets.Value("string"),
"concept_set_id": datasets.Value("int32"),
"concepts": [datasets.Value("string")],
"target": datasets.Value("string"), # single target for train
"references": [datasets.Value("string")], # multiple references for validation
}
)
elif self.config.name == "cs_restaurants":
features = datasets.Features(
{
"gem_id": datasets.Value("string"),
"dialog_act": datasets.Value("string"),
"dialog_act_delexicalized": datasets.Value("string"),
"target_delexicalized": datasets.Value("string"),
"target": datasets.Value("string"),
"references": [datasets.Value("string")],
}
)
elif self.config.name == "dart":
features = datasets.Features(
{
"gem_id": datasets.Value("string"),
"dart_id": datasets.Value("int32"),
"tripleset": [[datasets.Value("string")]], # list of triples
"subtree_was_extended": datasets.Value("bool"),
"target_sources": [datasets.Value("string")],
"target": datasets.Value("string"), # single target for train
"references": [datasets.Value("string")],
}
)
elif self.config.name == "e2e_nlg":
features = datasets.Features(
{
"gem_id": datasets.Value("string"),
"meaning_representation": datasets.Value("string"),
"target": datasets.Value("string"),
"references": [datasets.Value("string")],
}
)
elif self.config.name.startswith("mlsum"):
features = datasets.Features(
{
"gem_id": datasets.Value("string"),
"text": datasets.Value("string"),
"topic": datasets.Value("string"),
"url": datasets.Value("string"),
"title": datasets.Value("string"),
"date": datasets.Value("string"),
"target": datasets.Value("string"),
"references": [datasets.Value("string")],
}
)
elif self.config.name == "schema_guided_dialog":
features = datasets.Features(
{
"gem_id": datasets.Value("string"),
"dialog_acts": [
{
"act": datasets.ClassLabel(names=_SGD_ACTS),
"slot": datasets.Value("string"),
"values": [datasets.Value("string")],
}
],
"dialog_id": datasets.Value("string"),
"turn_id": datasets.Value("int32"),
"prompt": datasets.Value("string"),
"target": datasets.Value("string"),
"references": [datasets.Value("string")],
}
)
elif self.config.name == "totto":
features = datasets.Features(
{
"gem_id": datasets.Value("string"),
"totto_id": datasets.Value("int32"),
"table_page_title": datasets.Value("string"),
"table_webpage_url": datasets.Value("string"),
"table_section_title": datasets.Value("string"),
"table_section_text": datasets.Value("string"),
"table": [
[
{
"column_span": datasets.Value("int32"),
"is_header": datasets.Value("bool"),
"row_span": datasets.Value("int32"),
"value": datasets.Value("string"),
}
]
],
"highlighted_cells": [[datasets.Value("int32")]],
"example_id": datasets.Value("string"),
"sentence_annotations": [
{
"original_sentence": datasets.Value("string"),
"sentence_after_deletion": datasets.Value("string"),
"sentence_after_ambiguity": datasets.Value("string"),
"final_sentence": datasets.Value("string"),
}
],
"overlap_subset": datasets.Value("string"),
"target": datasets.Value("string"), # single target for train
"references": [datasets.Value("string")],
},
)
elif self.config.name.startswith("web_nlg"):
features = datasets.Features(
{
"gem_id": datasets.Value("string"),
"input": [datasets.Value("string")],
"target": datasets.Value("string"), # single target for train
"references": [datasets.Value("string")],
"category": datasets.Value("string"),
"webnlg_id": datasets.Value("string"),
}
)
elif self.config.name == "wiki_auto_asset_turk":
features = datasets.Features(
{
"gem_id": datasets.Value("string"),
"source_id": datasets.Value("string"),
"target_id": datasets.Value("string"),
"source": datasets.Value("string"),
"target": datasets.Value("string"),
"references": [datasets.Value("string")],
}
)
elif self.config.name.startswith("wiki_lingua"):
features = datasets.Features(
{
"gem_id": datasets.Value("string"),
"source": datasets.Value("string"),
"target": datasets.Value("string"),
"references": [datasets.Value("string")],
}
)
elif self.config.name == "xsum":
features = datasets.Features(
{
"gem_id": datasets.Value("string"),
"xsum_id": datasets.Value("string"),
"document": datasets.Value("string"),
"target": datasets.Value("string"),
"references": [datasets.Value("string")],
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
supervised_keys=None,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
dl_dir = dl_manager.download_and_extract(_URLs[self.config.name])
if self.config.name == "common_gen":
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepath": os.path.join(dl_dir["data"], "commongen.train.jsonl"),
"split": "train",
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"filepath": os.path.join(dl_dir["data"], "commongen.dev.jsonl"),
"split": "validation",
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"filepath": os.path.join(dl_dir["data"], "commongen.test_noref.jsonl"),
"split": "test",
},
),
]
elif self.config.name == "cs_restaurants":
return [
datasets.SplitGenerator(name=spl, gen_kwargs={"filepath": dl_dir[spl], "split": spl})
for spl in ["train", "validation", "test"]
]
elif self.config.name == "dart":
return [
datasets.SplitGenerator(name=spl, gen_kwargs={"filepath": dl_dir[spl], "split": spl})
for spl in ["train", "validation", "test"]
]
elif self.config.name == "e2e_nlg":
return [
datasets.SplitGenerator(name=spl, gen_kwargs={"filepath": dl_dir[spl], "split": spl})
for spl in ["train", "validation", "test"]
]
elif self.config.name.startswith("mlsum"):
lang = self.config.name.split("_")[1]
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepath": os.path.join(dl_dir["train"], lang + "_train.jsonl"),
"split": "train",
"lang": lang,
"filepaths": dl_dir["bad_ids"],
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"filepath": os.path.join(dl_dir["validation"], lang + "_val.jsonl"),
"split": "validation",
"lang": lang,
"filepaths": dl_dir["bad_ids"],
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"filepath": os.path.join(dl_dir["test"], lang + "_test.jsonl"),
"split": "test",
"lang": lang,
"filepaths": dl_dir["bad_ids"],
},
),
]
elif self.config.name == "schema_guided_dialog":
return [
datasets.SplitGenerator(
name=spl, gen_kwargs={"filepath": os.path.join(dl_dir["data"], "gem_sgd.json"), "split": spl}
)
for spl in ["train", "validation", "test"]
]
elif self.config.name == "totto":
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepath": os.path.join(dl_dir["data"], "totto_data/totto_train_data.jsonl"),
"split": "train",
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"filepath": os.path.join(dl_dir["data"], "totto_data/totto_dev_data.jsonl"),
"split": "validation",
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"filepath": os.path.join(dl_dir["data"], "totto_data/unlabeled_totto_test_data.jsonl"),
"split": "test",
},
),
]
elif self.config.name.startswith("web_nlg"):
return [
datasets.SplitGenerator(name=spl, gen_kwargs={"filepath": dl_dir[spl], "split": spl})
for spl in ["train", "validation", "test"]
]
elif self.config.name == "wiki_auto_asset_turk":
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepath": dl_dir["train"],
"split": "train",
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"filepath": dl_dir["validation"],
"split": "validation",
},
),
datasets.SplitGenerator(
name="test_asset",
gen_kwargs={
"filepath": "",
"split": "test",
"filepaths": [dl_dir[f"test_asset_{i}"] for i in range(10)],
},
),
datasets.SplitGenerator(
name="test_turk",
gen_kwargs={
"filepath": "",
"split": "test",
"filepaths": [dl_dir[f"test_turk_{i}"] for i in range(8)],
},
),
]
elif self.config.name.startswith("wiki_lingua"):
lang = self.config.name.split("_")[-2]
base_dir = os.path.join(dl_dir["data"], "GEM_data_crosslingual", f"{lang}_en")
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepath": base_dir,
"split": "train",
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"filepath": base_dir,
"split": "val",
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"filepath": base_dir,
"split": "test",
},
),
]
elif self.config.name == "xsum":
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepath": dl_dir["splits"],
"split": "train",
"filepaths": os.path.join(dl_dir["data"], "bbc-summary-data"),
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"filepath": dl_dir["splits"],
"split": "validation",
"filepaths": os.path.join(dl_dir["data"], "bbc-summary-data"),
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"filepath": dl_dir["splits"],
"split": "test",
"filepaths": os.path.join(dl_dir["data"], "bbc-summary-data"),
},
),
]
def _generate_examples(self, filepath, split, filepaths=None, lang=None):
""" Yields examples. """
if self.config.name == "common_gen":
with open(filepath, encoding="utf-8") as f:
id_ = -1
i = -1
for row in f:
row = row.replace(", }", "}") # Fix possible JSON format error
data = json.loads(row)
concepts = [word for word in data["concept_set"].split("#")]
if split == "train":
i += 1
for scene in data["scene"]:
id_ += 1
yield id_, {
"gem_id": f"{self.config.name}-{split}-{id_}",
"concept_set_id": i,
"concepts": concepts,
"target": scene,
"references": [],
}
else:
id_ += 1
yield id_, {
"gem_id": f"{self.config.name}-{split}-{id_}",
"concept_set_id": id_,
"concepts": concepts,
"target": "" if split == "test" else data["scene"][0],
"references": [] if split == "test" else data["scene"],
}
elif | |
better, better_rows):
i = 0
table1 = []
table2 = []
sheet_name = []
for title in titles:
print("\n")
print("Test Name: " + title)
print("---------------------------------------------------------------------------------------------------")
if not better[i]:
print("Response Times Worse than in the Previous Build:\n")
print("☹ No Requests with Better Response Times ☹\n")
df1 = pd.DataFrame({"Results": "☹ No Requests with Better Response Times ☹"},
index=pd.Index(range(1, 2, 1), name="Better Requests"))
table1.append(df1)
else:
print("Response Times the Same or Better than in the Previous Build:\n")
df1 = pd.DataFrame({"Request Name": better_requests[i],
"Deviation(ms)": better[i]},
index=pd.Index(range(1, better_rows[i] + 1, 1), name="Same or Better Response Times"))
print(df1.to_string() + "\n")
table1.append(df1)
if not worse[i]:
print("Response Times Worse than in the Previous Build:\n")
print("☺ No Requests with Worse Response Times ☺\n")
df2 = pd.DataFrame({"Results": "☺ No Requests with Worse Response Times ☺"},
index=pd.Index(range(1, 2, 1), name="Worse Requests"))
table2.append(df2)
else:
print("Response Times Worse than in the Previous Build:\n")
df2 = pd.DataFrame({"Request Name": worse_requests[i],
"Deviation(ms)": worse[i]},
index=pd.Index(range(1, worse_rows[i] + 1, 1), name="Worse Response Times"))
print(df2.to_string() + "\n")
table2.append(df2)
sheet_name.append(title[:31])
i += 1
return table1, table2, sheet_name
@staticmethod
def errors_parser(titles, tables):
i = 0
rows = []
request = []
errors_percentage = []
errors_deviation = []
codes = []
codes_old = []
title = []
flag = []
for table in tables:
request_name = core.request_name(table)
http_codes, old_codes = core.http_codes(table)
current, compared = core.errors(table)
request_part, errors_percentage_part, errors_deviation_part, codes_part, old_codes_part, rows_part = core.error_requests(request_name, current, compared, http_codes, old_codes)
if request_part:
request.append(request_part)
errors_percentage.append(errors_percentage_part)
errors_deviation.append(errors_deviation_part)
codes.append(codes_part)
codes_old.append(old_codes_part)
rows.append(rows_part)
title.append(titles[i])
flag.append(1)
else:
request.append("No Problematical Requests")
errors_percentage.append("No Errors")
errors_deviation.append("No Error Deviation")
codes.append("No HTTP Codes")
codes_old.append("No Old HTTP Codes")
rows.append(1)
title.append(titles[i])
i += 1
return title, request, errors_percentage, errors_deviation, codes, codes_old, rows, flag
@staticmethod
def errors_result(titles, error_requests, errors_percentage, errors_deviation, codes, old_codes, rows, flag):
table = []
sheet_name = []
if not flag:
print("\n")
print("☺ No Test with Request Errors in Entire Suite ☺\n")
df = pd.DataFrame({"Error Requests": "☺ No Test with Request Errors in Entire Suite ☺",
"Number Suite Tests": len(titles)},
index=pd.Index(range(1, 2, 1), name="Errors"))
table.append(df)
sheet_name.append("Suite Has No Errors")
else:
i = 0
for title in titles:
print("\n")
print("Test Name: " + title)
print("-----------------------------------------------------------------------------------------------")
print("Requests with Errors:\n")
df = pd.DataFrame({"ErrorRequests": error_requests[i],
"ErrorPercentage": errors_percentage[i],
"CurrentCodes": codes[i],
"ErrorDeviation": errors_deviation[i],
"PreviousCodes": old_codes[i]},
index=pd.Index(range(1, rows[i] + 1, 1), name="Errors"))
print(df.to_string() + "\n")
i += 1
table.append(df)
sheet_name.append(title[:31])
return table, sheet_name
@staticmethod
def response_parser(tables, data_type):
i = 0
request = []
response = []
previous = []
deviation = []
error_percentage = []
rows = []
for table in tables:
request_name = core.request_name(table)
response_time, deviation_int = core.response_time(table, data_type)
current, compared = core.errors(table)
request_part, response_part, previous_part, deviation_part, percentage, rows_part = core.response_classification(request_name, response_time, deviation_int, current)
request.append(request_part)
response.append(response_part)
previous.append(previous_part)
deviation.append(deviation_part)
error_percentage.append(percentage)
rows.append(rows_part)
i += 1
return request, response, previous, deviation, error_percentage, rows
@staticmethod
def response_by_request_stage_results(suite_title, titles, request, responses, error_percentage, rows, threshold):
response_table = []
errors_table = []
dict_response = {}
dict_errors = {}
print("\n")
print("SuiteName: " + suite_title)
print("---------------------------------------------------------------------------------------------------")
print("Response Times over " + str(threshold) + " ms:\n")
i = 0
for title in titles:
dict_response['Request Name'] = request[i]
dict_response[title] = responses[i]
dict_errors['Request Name'] = request[i]
dict_errors[title] = error_percentage[i]
i += 1
df1 = pd.DataFrame(dict_response, index=pd.Index(range(1, max(rows) + 1, 1), name="Responses over " + str(threshold) + " ms"))
df1 = df1[df1[df1 > threshold].count(axis=1) > 1]
if df1.empty:
print("☺ All Requests are within the Expected Response Time in " + suite_title + "!!! ☺\n\n")
df1 = pd.DataFrame({"Results": "☺ All Requests are within the Expected Response Time in " + suite_title + "!!! ☺"},
index=pd.Index(range(1, 2, 1), name="Responses over " + str(threshold) + " ms"))
df2 = pd.DataFrame(dict_errors, index=pd.Index(range(1, max(rows) + 1, 1), name="Errors Percentage [%]"))
print(df1.to_string() + "\n\n")
print("Errors in %:\n")
print(df2.to_string() + "\n")
response_table.append(df1)
errors_table.append(df2)
return response_table, errors_table
def urls_response_parser(self, urls, data_type):
i = 1
data = []
suite_list = []
test_list = []
request_list = []
for url in urls:
suite_title, titles, tables = self.url_parser(url)
suite_title = "Suite#" + str(i) + ": " + suite_title
suite_requests, suite_responses, suite_previous, suite_deviations, suite_error_percentages, old_row = self.response_parser(tables, data_type)
for title, requests, responses, previous, deviations, errors in zip(titles, suite_requests, suite_responses, suite_previous, suite_deviations, suite_error_percentages):
for request, response, prev, deviation, error in zip(requests, responses, previous, deviations, errors):
dictionary = {}
if request != 'All URIs':
dictionary['SuiteName'] = suite_title
dictionary['Test Name'] = title
dictionary['Request Name'] = request
dictionary['PreviousTime'] = prev
dictionary['ResponseTime'] = response
dictionary['Deviation'] = deviation
dictionary['Errors'] = error
data.append(dictionary)
request_list.append(request)
test_list.append(title)
suite_list.append(suite_title)
i += 1
data = sorted(data, key=lambda k: k['Request Name'])
request_list = list(set(request_list))
request_list.sort()
test_list = list(set(test_list))
test_list.sort()
return suite_list, test_list, request_list, data
@staticmethod
def response_by_request_across_urls_results(suite_list, test_list, request_list, data, threshold):
sheet_names = []
response_table = []
errors_table = []
for request in request_list:
response_dfs = []
previous_dfs = []
errors_dfs = []
for suite in suite_list:
rows = 0
tests = []
dict_response = {}
dict_previous = {}
dict_errors = {}
response_list = []
previous_list = []
errors_list = []
for test in test_list:
for dictionary in data:
if dictionary['Request Name'] == request and dictionary['Test Name'] == test and dictionary['SuiteName'] == suite:
tests.append(dictionary['Test Name'])
previous_list.append(dictionary['PreviousTime'])
response_list.append(dictionary['ResponseTime'])
errors_list.append(dictionary['Errors'])
rows += 1
dict_previous['Test Name'] = tests
dict_previous[suite + " - Previous Build"] = previous_list
dict_response['Test Name'] = tests
dict_response[suite + " - Current Build"] = response_list
dict_errors['Test Name'] = tests
dict_errors[suite] = errors_list
df1 = pd.DataFrame(dict_response, index=pd.Index(range(1, rows + 1, 1), name="Responses over " + str(threshold) + " ms"))
response_dfs.append(df1)
df2 = pd.DataFrame(dict_previous, index=pd.Index(range(1, rows + 1, 1), name="Responses over " + str(threshold) + " ms"))
previous_dfs.append(df2)
df3 = pd.DataFrame(dict_errors, index=pd.Index(range(1, rows + 1, 1), name="Errors Percentage [%]"))
errors_dfs.append(df3)
i = 0
for response, previous, error in zip(response_dfs, previous_dfs, errors_dfs):
if i == 0:
response_df = pd.merge(previous, response, how='outer', on='Test Name')
errors_df = error
else:
response_df = pd.merge(response_df, previous, how='outer', on='Test Name')
response_df = pd.merge(response_df, response, how='outer', on='Test Name')
errors_df = pd.merge(errors_df, error, how='outer', on='Test Name')
i += 1
print("\n")
print("Request Name: " + request)
print("---------------------------------------------------------------------------------------------------")
print("Response Times over " + str(threshold) + " ms:\n")
response_df.index.names = ["Responses over " + str(threshold) + " ms"]
response_df = response_df[response_df[response_df > threshold].count(axis=1) > 1]
if response_df.empty:
print("☺ All Tests are within the Expected Response Time for Request: " + request + "!!! ☺\n\n")
response_df = pd.DataFrame({"Results": "☺ All Tests are within the Expected Response Time for Request: " + request + "!!! ☺"},
index=pd.Index(range(1, 2, 1), name="Responses over " + str(threshold) + " ms"))
print(response_df.to_string() + "\n\n")
print("Errors in %:\n")
errors_df.index.names = ["Errors Percentage [%]"]
print(errors_df.to_string() + "\n")
response_table.append(response_df)
errors_table.append(errors_df)
sheet_names.append(request[:31])
return response_table, errors_table, sheet_names
@staticmethod
def response_by_test_across_urls_results(suite_list, test_list, request_list, data, threshold):
sheet_names = []
response_table = []
errors_table = []
for test in test_list:
response_dfs = []
previous_dfs = []
errors_dfs = []
for suite in suite_list:
rows = 0
requests = []
dict_response = {}
dict_previous = {}
dict_errors = {}
response_list = []
previous_list = []
errors_list = []
for request in request_list:
for dictionary in data:
if dictionary['Request Name'] == request and dictionary['Test Name'] == test and dictionary['SuiteName'] == suite:
requests.append(dictionary['Request Name'])
previous_list.append(dictionary['PreviousTime'])
response_list.append(dictionary['ResponseTime'])
errors_list.append(dictionary['Errors'])
rows += 1
dict_previous['Request Name'] = requests
dict_previous[suite + " - Previous Build"] = previous_list
dict_response['Request Name'] = requests
dict_response[suite + " - Current Build"] = response_list
dict_errors['Request Name'] = requests
dict_errors[suite] = errors_list
df1 = pd.DataFrame(dict_response, index=pd.Index(range(1, rows + 1, 1), name="Responses over " + str(threshold) + " ms"))
response_dfs.append(df1)
df2 = pd.DataFrame(dict_previous, index=pd.Index(range(1, rows + 1, 1), name="Responses over " + str(threshold) + " ms"))
previous_dfs.append(df2)
df3 = pd.DataFrame(dict_errors, index=pd.Index(range(1, rows + 1, 1), name="Errors Percentage [%]"))
errors_dfs.append(df3)
i = 0
for response, previous, error in zip(response_dfs, previous_dfs, errors_dfs):
if i == 0:
response_df = pd.merge(previous, response, how='outer', on='Request Name')
errors_df = error
else:
response_df = pd.merge(response_df, previous, how='outer', on='Request Name')
response_df = pd.merge(response_df, response, how='outer', on='Request Name')
errors_df = pd.merge(errors_df, error, how='outer', on='Request Name')
i += 1
print("\n")
print("Test Name: " + test)
print("---------------------------------------------------------------------------------------------------")
print("Response Times over " + str(threshold) + " ms:\n")
response_df.index.names = ["Responses over " + str(threshold) + " | |
<reponame>ItGirls/event_extraction<gh_stars>100-1000
import os
import numpy as np
import time
import logging
from common_utils import set_logger
import tensorflow as tf
from sklearn.metrics import f1_score
from models.bert_mrc import bert_mrc_model_fn_builder
from models.bert_event_type_classification import bert_classification_model_fn_builder
from data_processing.data_utils import *
from data_processing.event_prepare_data import EventRolePrepareMRC, EventTypeClassificationPrepare
# from data_processing.event_prepare_data import EventRoleClassificationPrepare
from data_processing.event_prepare_data import event_input_bert_mrc_mul_fn, event_index_class_input_bert_fn
from data_processing.event_prepare_data import event_binclass_input_bert_fn
from models.bert_event_type_classification import bert_binaryclassification_model_fn_builder
from data_processing.event_prepare_data import event_input_verfify_mrc_fn
from models.event_verify_av import event_verify_mrc_model_fn_builder
from configs.event_config import event_config
# import horovod.tensorflow as hvd
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
logger = set_logger("[run training]")
# logger = logging.getLogger('train')
# logger.setLevel(logging.INFO)
# os.environ['TF_ENABLE_AUTO_MIXED_PRECISION']='1'
def serving_input_receiver_fn():
"""Serving input_fn that builds features from placeholders
Returns
-------
tf.estimator.export.ServingInputReceiver
"""
words = tf.placeholder(dtype=tf.int32, shape=[None, None], name='words')
nwords = tf.placeholder(dtype=tf.int32, shape=[None], name='text_length')
words_seq = tf.placeholder(dtype=tf.int32, shape=[None, None], name='words_seq')
receiver_tensors = {'words': words, 'text_length': nwords, 'words_seq': words_seq}
features = {'words': words, 'text_length': nwords, 'words_seq': words_seq}
return tf.estimator.export.ServingInputReceiver(features, receiver_tensors)
def bert_serving_input_receiver_fn():
"""Serving input_fn that builds features from placeholders
Returns
-------
tf.estimator.export.ServingInputReceiver
"""
words = tf.placeholder(dtype=tf.int32, shape=[None, None], name='words')
nwords = tf.placeholder(dtype=tf.int32, shape=[None], name='text_length')
receiver_tensors = {'words': words, 'text_length': nwords}
features = {'words': words, 'text_length': nwords}
return tf.estimator.export.ServingInputReceiver(features, receiver_tensors)
def bert_event_type_serving_input_receiver_fn():
words = tf.placeholder(dtype=tf.int32, shape=[None, None], name='words')
nwords = tf.placeholder(dtype=tf.int32, shape=[None], name='text_length')
token_type_ids = tf.placeholder(dtype=tf.int32, shape=[None, None], name="token_type_ids")
type_index_ids = tf.placeholder(dtype=tf.int32, shape=[None, 65], name="type_index_in_ids_list")
receiver_tensors = {'words': words, 'text_length': nwords, 'token_type_ids': token_type_ids,
'type_index_in_ids_list': type_index_ids}
features = {'words': words, 'text_length': nwords, 'token_type_ids': token_type_ids,
'type_index_in_ids_list': type_index_ids}
return tf.estimator.export.ServingInputReceiver(features, receiver_tensors)
def bert_event_bin_serving_input_receiver_fn():
words = tf.placeholder(dtype=tf.int32, shape=[None, None], name='words')
nwords = tf.placeholder(dtype=tf.int32, shape=[None], name='text_length')
token_type_ids = tf.placeholder(dtype=tf.int32, shape=[None, None], name="token_type_ids")
receiver_tensors = {'words': words, 'text_length': nwords, 'token_type_ids': token_type_ids}
features = {'words': words, 'text_length': nwords, 'token_type_ids': token_type_ids}
return tf.estimator.export.ServingInputReceiver(features, receiver_tensors)
def bert_mrc_serving_input_receiver_fn():
# features['words'],features['text_length'],features['query_length'],features['token_type_ids']
words = tf.placeholder(dtype=tf.int32, shape=[None, None], name='words')
nwords = tf.placeholder(dtype=tf.int32, shape=[None], name='text_length')
query_lengths = tf.placeholder(dtype=tf.int32, shape=[None], name="query_length")
token_type_ids = tf.placeholder(dtype=tf.int32, shape=[None, None], name="token_type_ids")
receiver_tensors = {'words': words, 'text_length': nwords, 'query_length': query_lengths,
'token_type_ids': token_type_ids}
features = {'words': words, 'text_length': nwords, 'query_length': query_lengths, 'token_type_ids': token_type_ids}
return tf.estimator.export.ServingInputReceiver(features, receiver_tensors)
def run_event_role_mrc(args):
"""
baseline 用mrc来做事件role抽取
:param args:
:return:
"""
model_base_dir = event_config.get(args.model_checkpoint_dir).format(args.fold_index)
pb_model_dir = event_config.get(args.model_pb_dir).format(args.fold_index)
vocab_file_path = os.path.join(event_config.get("bert_pretrained_model_path"), event_config.get("vocab_file"))
bert_config_file = os.path.join(event_config.get("bert_pretrained_model_path"),
event_config.get("bert_config_path"))
slot_file = os.path.join(event_config.get("slot_list_root_path"),
event_config.get("bert_slot_complete_file_name_role"))
schema_file = os.path.join(event_config.get("data_dir"), event_config.get("event_schema"))
query_map_file = os.path.join(event_config.get("slot_list_root_path"), event_config.get("query_map_file"))
data_loader = EventRolePrepareMRC(vocab_file_path, 512, slot_file, schema_file, query_map_file)
# train_file = os.path.join(event_config.get("data_dir"), event_config.get("event_data_file_train"))
# eval_file = os.path.join(event_config.get("data_dir"), event_config.get("event_data_file_eval"))
# data_list,label_start_list,label_end_list,query_len_list,token_type_id_list
# train_datas, train_labels_start,train_labels_end,train_query_lens,train_token_type_id_list,dev_datas, dev_labels_start,dev_labels_end,dev_query_lens,dev_token_type_id_list = data_loader._read_json_file(train_file,eval_file,True)
# dev_datas, dev_labels_start,dev_labels_end,dev_query_lens,dev_token_type_id_list = data_loader._read_json_file(eval_file,None,False)
# train_datas, train_labels_start,train_labels_end,train_query_lens,train_token_type_id_list,dev_datas, dev_labels_start,dev_labels_end,dev_query_lens,dev_token_type_id_list = data_loader._merge_ee_and_re_datas(train_file,eval_file,"relation_extraction/data/train_data.json","relation_extraction/data/dev_data.json")
train_datas = np.load("data/neg_fold_data_{}/token_ids_train.npy".format(args.fold_index), allow_pickle=True)
train_labels = np.load("data/neg_fold_data_{}/multi_labels_train.npy".format(args.fold_index), allow_pickle=True)
train_query_lens = np.load("data/neg_fold_data_{}/query_lens_train.npy".format(args.fold_index), allow_pickle=True)
train_token_type_id_list = np.load("data/neg_fold_data_{}/token_type_ids_train.npy".format(args.fold_index),
allow_pickle=True)
dev_datas = np.load("data/neg_fold_data_{}/token_ids_dev.npy".format(args.fold_index), allow_pickle=True)
dev_labels = np.load("data/neg_fold_data_{}/multi_labels_dev.npy".format(args.fold_index), allow_pickle=True)
dev_query_lens = np.load("data/neg_fold_data_{}/query_lens_dev.npy".format(args.fold_index), allow_pickle=True)
dev_token_type_id_list = np.load("data/neg_fold_data_{}/token_type_ids_dev.npy".format(args.fold_index),
allow_pickle=True)
train_samples_nums = len(train_datas)
dev_samples_nums = len(dev_datas)
if train_samples_nums % args.train_batch_size != 0:
each_epoch_steps = int(train_samples_nums / args.train_batch_size) + 1
else:
each_epoch_steps = int(train_samples_nums / args.train_batch_size)
# each_epoch_steps = int(data_loader.train_samples_nums/args.train_batch_size)+1
logger.info('*****train_set sample nums:{}'.format(train_samples_nums))
logger.info('*****dev_set sample nums:{}'.format(dev_samples_nums))
logger.info('*****train each epoch steps:{}'.format(each_epoch_steps))
train_steps_nums = each_epoch_steps * args.epochs
# train_steps_nums = each_epoch_steps * args.epochs // hvd.size()
logger.info('*****train_total_steps:{}'.format(train_steps_nums))
decay_steps = args.decay_epoch * each_epoch_steps
logger.info('*****train decay steps:{}'.format(decay_steps))
# dropout_prob是丢弃概率
params = {"dropout_prob": args.dropout_prob, "num_labels": data_loader.labels_map_len,
"rnn_size": args.rnn_units, "num_layers": args.num_layers, "hidden_units": args.hidden_units,
"decay_steps": decay_steps, "train_steps": train_steps_nums,
"num_warmup_steps": int(train_steps_nums * 0.1)}
# dist_strategy = tf.contrib.distribute.MirroredStrategy(num_gpus=args.gpu_nums)
config_tf = tf.ConfigProto()
config_tf.gpu_options.allow_growth = True
run_config = tf.estimator.RunConfig(
model_dir=model_base_dir,
save_summary_steps=each_epoch_steps,
save_checkpoints_steps=each_epoch_steps,
session_config=config_tf,
keep_checkpoint_max=3,
# train_distribute=dist_strategy
)
bert_init_checkpoints = os.path.join(event_config.get("bert_pretrained_model_path"),
event_config.get("bert_init_checkpoints"))
# init_checkpoints = "output/model/merge_usingtype_roberta_traindev_event_role_bert_mrc_model_desmodified_lowercase/checkpoint/model.ckpt-1218868"
model_fn = bert_mrc_model_fn_builder(bert_config_file, bert_init_checkpoints, args)
estimator = tf.estimator.Estimator(
model_fn,
params=params,
config=run_config)
if args.do_train:
train_input_fn = lambda: event_input_bert_mrc_mul_fn(
train_datas, train_labels, train_token_type_id_list, train_query_lens,
is_training=True, is_testing=False, args=args)
eval_input_fn = lambda: event_input_bert_mrc_mul_fn(
dev_datas, dev_labels, dev_token_type_id_list, dev_query_lens,
is_training=False, is_testing=False, args=args)
train_spec = tf.estimator.TrainSpec(input_fn=train_input_fn, max_steps=train_steps_nums
)
exporter = tf.estimator.BestExporter(exports_to_keep=1,
serving_input_receiver_fn=bert_mrc_serving_input_receiver_fn)
eval_spec = tf.estimator.EvalSpec(input_fn=eval_input_fn, exporters=[exporter], throttle_secs=0)
# for _ in range(args.epochs):
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
# "bert_ce_model_pb"
estimator.export_saved_model(pb_model_dir, bert_mrc_serving_input_receiver_fn)
def run_event_classification(args):
"""
事件类型分析,多标签二分类问题,借鉴NL2SQL预测column的方法
:param args:
:return:
"""
model_base_dir = event_config.get(args.model_checkpoint_dir).format(args.fold_index)
pb_model_dir = event_config.get(args.model_pb_dir).format(args.fold_index)
print(model_base_dir)
print(pb_model_dir)
vocab_file_path = os.path.join(event_config.get("bert_pretrained_model_path"), event_config.get("vocab_file"))
bert_config_file = os.path.join(event_config.get("bert_pretrained_model_path"),
event_config.get("bert_config_path"))
event_type_file = os.path.join(event_config.get("slot_list_root_path"), event_config.get("event_type_file"))
data_loader = EventTypeClassificationPrepare(vocab_file_path, 512, event_type_file)
train_file = os.path.join(event_config.get("data_dir"), event_config.get("event_data_file_train"))
eval_file = os.path.join(event_config.get("data_dir"), event_config.get("event_data_file_eval"))
# train_data_list,train_label_list,train_token_type_id_list,dev_data_list,dev_label_list,dev_token_type_id_list = data_loader._read_json_file(train_file,eval_file,is_train=True)
train_data_list = np.load("data/index_type_fold_data_{}/token_ids_train.npy".format(args.fold_index),
allow_pickle=True)
train_label_list = np.load("data/index_type_fold_data_{}/labels_train.npy".format(args.fold_index),
allow_pickle=True)
train_token_type_id_list = np.load("data/index_type_fold_data_{}/token_type_ids_train.npy".format(args.fold_index),
allow_pickle=True)
train_type_index_ids_list = np.load(
"data/index_type_fold_data_{}/type_index_in_token_ids_train.npy".format(args.fold_index), allow_pickle=True)
dev_data_list = np.load("data/index_type_fold_data_{}/token_ids_dev.npy".format(args.fold_index), allow_pickle=True)
dev_label_list = np.load("data/index_type_fold_data_{}/labels_dev.npy".format(args.fold_index), allow_pickle=True)
dev_token_type_id_list = np.load("data/index_type_fold_data_{}/token_type_ids_dev.npy".format(args.fold_index),
allow_pickle=True)
dev_type_index_ids_list = np.load(
"data/index_type_fold_data_{}/type_index_in_token_ids_dev.npy".format(args.fold_index), allow_pickle=True)
train_labels = np.array(train_label_list)
# print(train_labels.shape)
print(train_labels.shape)
a = np.sum(train_labels, axis=0)
a = [max(a) / ele for ele in a]
class_weight = np.array(a)
class_weight = np.reshape(class_weight, (1, 65))
print(class_weight)
# dev_datas,dev_token_type_ids,dev_labels = data_loader._read_json_file(eval_file)
train_samples_nums = len(train_data_list)
dev_samples_nums = len(dev_data_list)
if train_samples_nums % args.train_batch_size != 0:
each_epoch_steps = int(train_samples_nums / args.train_batch_size) + 1
else:
each_epoch_steps = int(train_samples_nums / args.train_batch_size)
# each_epoch_steps = int(data_loader.train_samples_nums/args.train_batch_size)+1
logger.info('*****train_set sample nums:{}'.format(train_samples_nums))
logger.info('*****train each epoch steps:{}'.format(each_epoch_steps))
train_steps_nums = each_epoch_steps * args.epochs
# train_steps_nums = each_epoch_steps * args.epochs // hvd.size()
logger.info('*****train_total_steps:{}'.format(train_steps_nums))
decay_steps = args.decay_epoch * each_epoch_steps
logger.info('*****train decay steps:{}'.format(decay_steps))
# dropout_prob是丢弃概率
params = {"dropout_prob": args.dropout_prob, "num_labels": data_loader.labels_map_len,
"rnn_size": args.rnn_units, "num_layers": args.num_layers, "hidden_units": args.hidden_units,
"decay_steps": decay_steps, "class_weight": class_weight}
# dist_strategy = tf.contrib.distribute.MirroredStrategy(num_gpus=args.gpu_nums)
config_tf = tf.ConfigProto()
config_tf.gpu_options.allow_growth = True
# "bert_ce_model_dir"
# mirrored_strategy = tf.distribute.MirroredStrategy()
# config_tf.gpu_options.visible_device_list = str(hvd.local_rank())
# checkpoint_path = os.path.join(bert_config.get(args.model_checkpoint_dir), str(hvd.rank()))
run_config = tf.estimator.RunConfig(
model_dir=model_base_dir,
save_summary_steps=train_steps_nums + 10,
save_checkpoints_steps=each_epoch_steps,
session_config=config_tf,
keep_checkpoint_max=1,
# train_distribute=dist_strategy
)
bert_init_checkpoints = os.path.join(event_config.get("bert_pretrained_model_path"),
event_config.get("bert_init_checkpoints"))
model_fn = bert_classification_model_fn_builder(bert_config_file, bert_init_checkpoints, args)
estimator = tf.estimator.Estimator(
model_fn,
params=params,
config=run_config)
if args.do_train:
# train_input_fn = lambda: data_loader.create_dataset(is_training=True,is_testing=False, args=args)
# eval_input_fn = lambda: data_loader.create_dataset(is_training=False,is_testing=False,args=args)
# train_X,train_Y = np.load(data_loader.train_X_path,allow_pickle=True),np.load(data_loader.train_Y_path,allow_pickle=True)
# train_input_fn = lambda :event_class_input_bert_fn(train_data_list,token_type_ids=train_token_type_id_list,label_map_len=data_loader.labels_map_len,
# is_training=True,is_testing=False,args=args,input_Ys=train_label_list)
train_input_fn = lambda: event_index_class_input_bert_fn(train_data_list,
token_type_ids=train_token_type_id_list,
type_index_ids_list=train_type_index_ids_list,
label_map_len=data_loader.labels_map_len,
is_training=True, is_testing=False, args=args,
input_Ys=train_label_list)
# eval_X,eval_Y = np.load(data_loader.valid_X_path,allow_pickle=True),np.load(data_loader.valid_Y_path,allow_pickle=True)
# eval_input_fn = lambda: event_class_input_bert_fn(dev_data_list,token_type_ids=dev_token_type_id_list,label_map_len=data_loader.labels_map_len,
# is_training=False,is_testing=False,args=args,input_Ys=dev_label_list)
eval_input_fn = lambda: event_index_class_input_bert_fn(dev_data_list, token_type_ids=dev_token_type_id_list,
type_index_ids_list=dev_type_index_ids_list,
label_map_len=data_loader.labels_map_len,
is_training=False, is_testing=False, args=args,
input_Ys=dev_label_list)
train_spec = tf.estimator.TrainSpec(input_fn=train_input_fn, max_steps=train_steps_nums
)
exporter = tf.estimator.BestExporter(exports_to_keep=1,
serving_input_receiver_fn=bert_event_type_serving_input_receiver_fn)
eval_spec = tf.estimator.EvalSpec(input_fn=eval_input_fn, throttle_secs=0, exporters=[exporter])
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
# "bert_ce_model_pb"
estimator.export_saved_model(pb_model_dir, bert_event_type_serving_input_receiver_fn)
def run_event_binclassification(args):
"""
retroreader中的eav模块,即第一遍阅读模块,预测该问题是否有回答
:param args:
:return:
"""
model_base_dir = event_config.get(args.model_checkpoint_dir).format(args.fold_index)
pb_model_dir = event_config.get(args.model_pb_dir).format(args.fold_index)
print(model_base_dir)
print(pb_model_dir)
vocab_file_path = os.path.join(event_config.get("bert_pretrained_model_path"), event_config.get("vocab_file"))
bert_config_file = os.path.join(event_config.get("bert_pretrained_model_path"),
event_config.get("bert_config_path"))
event_type_file = os.path.join(event_config.get("slot_list_root_path"), event_config.get("event_type_file"))
# data_loader =EventTypeClassificationPrepare(vocab_file_path,512,event_type_file)
# train_file = os.path.join(event_config.get("data_dir"),event_config.get("event_data_file_train"))
# eval_file = os.path.join(event_config.get("data_dir"),event_config.get("event_data_file_eval"))
# train_data_list,train_label_list,train_token_type_id_list,dev_data_list,dev_label_list,dev_token_type_id_list = data_loader._read_json_file(train_file,eval_file,is_train=True)
train_data_list = np.load("data/verify_neg_fold_data_{}/token_ids_train.npy".format(args.fold_index),
allow_pickle=True)
# train_label_list = np.load("data/verify_neg_fold_data_{}/has_answer_train.npy".format(args.fold_index),allow_pickle=True)
train_label_list = []
train_start_labels = np.load("data/verify_neg_fold_data_{}/labels_start_train.npy".format(args.fold_index),
allow_pickle=True)
dev_start_labels = np.load("data/verify_neg_fold_data_{}/labels_start_dev.npy".format(args.fold_index),
allow_pickle=True)
train_token_type_id_list = np.load("data/verify_neg_fold_data_{}/token_type_ids_train.npy".format(args.fold_index),
allow_pickle=True)
dev_data_list = np.load("data/verify_neg_fold_data_{}/token_ids_dev.npy".format(args.fold_index), allow_pickle=True)
# dev_label_list = np.load("data/verify_neg_fold_data_{}/has_answer_dev.npy".format(args.fold_index),allow_pickle=True)
dev_label_list = []
dev_token_type_id_list = np.load("data/verify_neg_fold_data_{}/token_type_ids_dev.npy".format(args.fold_index),
allow_pickle=True)
# dev_datas,dev_token_type_ids,dev_labels = data_loader._read_json_file(eval_file)
train_samples_nums = len(train_data_list)
for i in range(train_samples_nums):
if sum(train_start_labels[i]) == 0:
train_label_list.append(0)
else:
train_label_list.append(1)
train_label_list = np.array(train_label_list).reshape((train_samples_nums, 1))
dev_samples_nums = len(dev_data_list)
for i in range(dev_samples_nums):
if sum(dev_start_labels[i]) == 0:
dev_label_list.append(0)
else:
dev_label_list.append(1)
dev_label_list = np.array(dev_label_list).reshape((dev_samples_nums, 1))
if train_samples_nums % args.train_batch_size != 0:
each_epoch_steps = int(train_samples_nums / args.train_batch_size) + 1
else:
each_epoch_steps = int(train_samples_nums / args.train_batch_size)
# each_epoch_steps = int(data_loader.train_samples_nums/args.train_batch_size)+1
logger.info('*****train_set sample nums:{}'.format(train_samples_nums))
logger.info('*****train each epoch steps:{}'.format(each_epoch_steps))
train_steps_nums = each_epoch_steps * args.epochs
# train_steps_nums = each_epoch_steps * args.epochs // hvd.size()
logger.info('*****train_total_steps:{}'.format(train_steps_nums))
decay_steps = args.decay_epoch * each_epoch_steps
logger.info('*****train decay steps:{}'.format(decay_steps))
# dropout_prob是丢弃概率
params = {"dropout_prob": args.dropout_prob, "num_labels": 1,
"rnn_size": args.rnn_units, "num_layers": args.num_layers, "hidden_units": args.hidden_units,
"decay_steps": decay_steps, "class_weight": 1}
# dist_strategy = tf.contrib.distribute.MirroredStrategy(num_gpus=args.gpu_nums)
config_tf = tf.ConfigProto()
config_tf.gpu_options.allow_growth = True
# "bert_ce_model_dir"
# mirrored_strategy = tf.distribute.MirroredStrategy()
# config_tf.gpu_options.visible_device_list = str(hvd.local_rank())
# checkpoint_path = os.path.join(bert_config.get(args.model_checkpoint_dir), str(hvd.rank()))
run_config = tf.estimator.RunConfig(
model_dir=model_base_dir,
save_summary_steps=train_steps_nums + 10,
save_checkpoints_steps=each_epoch_steps,
session_config=config_tf,
keep_checkpoint_max=1,
# train_distribute=dist_strategy
)
bert_init_checkpoints = os.path.join(event_config.get("bert_pretrained_model_path"),
event_config.get("bert_init_checkpoints"))
model_fn = bert_binaryclassification_model_fn_builder(bert_config_file, bert_init_checkpoints, args)
estimator = tf.estimator.Estimator(
model_fn,
params=params,
config=run_config)
if args.do_train:
# train_input_fn = lambda: data_loader.create_dataset(is_training=True,is_testing=False, args=args)
# eval_input_fn = lambda: data_loader.create_dataset(is_training=False,is_testing=False,args=args)
# train_X,train_Y = np.load(data_loader.train_X_path,allow_pickle=True),np.load(data_loader.train_Y_path,allow_pickle=True)
# train_input_fn = lambda :event_class_input_bert_fn(train_data_list,token_type_ids=train_token_type_id_list,label_map_len=data_loader.labels_map_len,
# is_training=True,is_testing=False,args=args,input_Ys=train_label_list)
train_input_fn = lambda: event_binclass_input_bert_fn(train_data_list, token_type_ids=train_token_type_id_list,
label_map_len=1,
is_training=True, is_testing=False, args=args,
input_Ys=train_label_list)
# eval_X,eval_Y = np.load(data_loader.valid_X_path,allow_pickle=True),np.load(data_loader.valid_Y_path,allow_pickle=True)
# eval_input_fn = lambda: event_class_input_bert_fn(dev_data_list,token_type_ids=dev_token_type_id_list,label_map_len=data_loader.labels_map_len,
# is_training=False,is_testing=False,args=args,input_Ys=dev_label_list)
eval_input_fn = lambda: event_binclass_input_bert_fn(dev_data_list, token_type_ids=dev_token_type_id_list,
label_map_len=1,
is_training=False, is_testing=False, args=args,
input_Ys=dev_label_list)
train_spec = tf.estimator.TrainSpec(input_fn=train_input_fn, max_steps=train_steps_nums
)
exporter = tf.estimator.BestExporter(exports_to_keep=1,
serving_input_receiver_fn=bert_event_bin_serving_input_receiver_fn)
eval_spec = tf.estimator.EvalSpec(input_fn=eval_input_fn, throttle_secs=0, exporters=[exporter])
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
# "bert_ce_model_pb"
estimator.export_saved_model(pb_model_dir, bert_event_bin_serving_input_receiver_fn)
def run_event_verify_role_mrc(args):
"""
retro reader 第二阶段的精度模块,同时训练两个任务,role抽取和问题是否可以回答
:param args:
:return:
"""
model_base_dir = event_config.get(args.model_checkpoint_dir).format(args.fold_index)
pb_model_dir = event_config.get(args.model_pb_dir).format(args.fold_index)
vocab_file_path = os.path.join(event_config.get("bert_pretrained_model_path"), event_config.get("vocab_file"))
bert_config_file = os.path.join(event_config.get("bert_pretrained_model_path"),
event_config.get("bert_config_path"))
slot_file = os.path.join(event_config.get("slot_list_root_path"),
event_config.get("bert_slot_complete_file_name_role"))
schema_file = os.path.join(event_config.get("data_dir"), event_config.get("event_schema"))
query_map_file = os.path.join(event_config.get("slot_list_root_path"), event_config.get("query_map_file"))
data_loader = EventRolePrepareMRC(vocab_file_path, 512, slot_file, schema_file, query_map_file)
# train_file = os.path.join(event_config.get("data_dir"), event_config.get("event_data_file_train"))
# eval_file = os.path.join(event_config.get("data_dir"), event_config.get("event_data_file_eval"))
# data_list,label_start_list,label_end_list,query_len_list,token_type_id_list
# train_datas, train_labels_start,train_labels_end,train_query_lens,train_token_type_id_list,dev_datas, dev_labels_start,dev_labels_end,dev_query_lens,dev_token_type_id_list = data_loader._read_json_file(train_file,eval_file,True)
# dev_datas, dev_labels_start,dev_labels_end,dev_query_lens,dev_token_type_id_list = data_loader._read_json_file(eval_file,None,False)
# train_datas, train_labels_start,train_labels_end,train_query_lens,train_token_type_id_list,dev_datas, dev_labels_start,dev_labels_end,dev_query_lens,dev_token_type_id_list = data_loader._merge_ee_and_re_datas(train_file,eval_file,"relation_extraction/data/train_data.json","relation_extraction/data/dev_data.json")
train_has_answer_label_list = []
dev_has_answer_label_list = []
train_datas = np.load("data/verify_neg_fold_data_{}/token_ids_train.npy".format(args.fold_index), allow_pickle=True)
# train_has_answer_label_list = np.load("data/verify_neg_fold_data_{}/has_answer_train.npy".format(args.fold_index),allow_pickle=True)
| |
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data as data
import torch.optim as optim
import itertools
import tqdm
import copy
import scipy.stats as st
import os
import time
from scipy.stats import norm
device = 'cuda'
def gen_net(in_size=1, out_size=1, H=128, n_layers=3, activation='tanh'):
net = []
for i in range(n_layers):
net.append(nn.Linear(in_size, H))
net.append(nn.LeakyReLU())
in_size = H
net.append(nn.Linear(in_size, out_size))
if activation == 'tanh':
net.append(nn.Tanh())
elif activation == 'sig':
net.append(nn.Sigmoid())
else:
net.append(nn.ReLU())
return net
def KCenterGreedy(obs, full_obs, num_new_sample):
selected_index = []
current_index = list(range(obs.shape[0]))
new_obs = obs
new_full_obs = full_obs
start_time = time.time()
for count in range(num_new_sample):
dist = compute_smallest_dist(new_obs, new_full_obs)
max_index = torch.argmax(dist)
max_index = max_index.item()
if count == 0:
selected_index.append(max_index)
else:
selected_index.append(current_index[max_index])
current_index = current_index[0:max_index] + current_index[max_index+1:]
new_obs = obs[current_index]
new_full_obs = np.concatenate([
full_obs,
obs[selected_index]],
axis=0)
return selected_index
def compute_smallest_dist(obs, full_obs):
obs = torch.from_numpy(obs).float()
full_obs = torch.from_numpy(full_obs).float()
batch_size = 100
with torch.no_grad():
total_dists = []
for full_idx in range(len(obs) // batch_size + 1):
full_start = full_idx * batch_size
if full_start < len(obs):
full_end = (full_idx + 1) * batch_size
dists = []
for idx in range(len(full_obs) // batch_size + 1):
start = idx * batch_size
if start < len(full_obs):
end = (idx + 1) * batch_size
dist = torch.norm(
obs[full_start:full_end, None, :].to(device) - full_obs[None, start:end, :].to(device), dim=-1, p=2
)
dists.append(dist)
dists = torch.cat(dists, dim=1)
small_dists = torch.torch.min(dists, dim=1).values
total_dists.append(small_dists)
total_dists = torch.cat(total_dists)
return total_dists.unsqueeze(1)
class RewardModel:
def __init__(self, ds, da,
ensemble_size=3, lr=3e-4, mb_size = 128, size_segment=1,
env_maker=None, max_size=100, activation='tanh', capacity=5e5,
large_batch=1, label_margin=0.0,
teacher_beta=-1, teacher_gamma=1,
teacher_eps_mistake=0,
teacher_eps_skip=0,
teacher_eps_equal=0):
# train data is trajectories, must process to sa and s..
self.ds = ds
self.da = da
self.de = ensemble_size
self.lr = lr
self.ensemble = []
self.paramlst = []
self.opt = None
self.model = None
self.max_size = max_size
self.activation = activation
self.size_segment = size_segment
self.capacity = int(capacity)
self.buffer_seg1 = np.empty((self.capacity, size_segment, self.ds+self.da), dtype=np.float32)
self.buffer_seg2 = np.empty((self.capacity, size_segment, self.ds+self.da), dtype=np.float32)
self.buffer_label = np.empty((self.capacity, 1), dtype=np.float32)
self.buffer_index = 0
self.buffer_full = False
self.construct_ensemble()
self.inputs = []
self.targets = []
self.raw_actions = []
self.img_inputs = []
self.mb_size = mb_size
self.origin_mb_size = mb_size
self.train_batch_size = 128
self.CEloss = nn.CrossEntropyLoss()
self.running_means = []
self.running_stds = []
self.best_seg = []
self.best_label = []
self.best_action = []
self.large_batch = large_batch
# new teacher
self.teacher_beta = teacher_beta
self.teacher_gamma = teacher_gamma
self.teacher_eps_mistake = teacher_eps_mistake
self.teacher_eps_equal = teacher_eps_equal
self.teacher_eps_skip = teacher_eps_skip
self.teacher_thres_skip = 0
self.teacher_thres_equal = 0
self.label_margin = label_margin
self.label_target = 1 - 2*self.label_margin
def softXEnt_loss(self, input, target):
logprobs = torch.nn.functional.log_softmax (input, dim = 1)
return -(target * logprobs).sum() / input.shape[0]
def change_batch(self, new_frac):
self.mb_size = int(self.origin_mb_size*new_frac)
def set_batch(self, new_batch):
self.mb_size = int(new_batch)
def set_teacher_thres_skip(self, new_margin):
self.teacher_thres_skip = new_margin * self.teacher_eps_skip
def set_teacher_thres_equal(self, new_margin):
self.teacher_thres_equal = new_margin * self.teacher_eps_equal
def construct_ensemble(self):
for i in range(self.de):
model = nn.Sequential(*gen_net(in_size=self.ds+self.da,
out_size=1, H=256, n_layers=3,
activation=self.activation)).float().to(device)
self.ensemble.append(model)
self.paramlst.extend(model.parameters())
self.opt = torch.optim.Adam(self.paramlst, lr = self.lr)
def add_data(self, obs, act, rew, done):
sa_t = np.concatenate([obs, act], axis=-1)
r_t = rew
flat_input = sa_t.reshape(1, self.da+self.ds)
r_t = np.array(r_t)
flat_target = r_t.reshape(1, 1)
init_data = len(self.inputs) == 0
if init_data:
self.inputs.append(flat_input)
self.targets.append(flat_target)
elif done:
self.inputs[-1] = np.concatenate([self.inputs[-1], flat_input])
self.targets[-1] = np.concatenate([self.targets[-1], flat_target])
# FIFO
if len(self.inputs) > self.max_size:
self.inputs = self.inputs[1:]
self.targets = self.targets[1:]
self.inputs.append([])
self.targets.append([])
else:
if len(self.inputs[-1]) == 0:
self.inputs[-1] = flat_input
self.targets[-1] = flat_target
else:
self.inputs[-1] = np.concatenate([self.inputs[-1], flat_input])
self.targets[-1] = np.concatenate([self.targets[-1], flat_target])
def add_data_batch(self, obses, rewards):
num_env = obses.shape[0]
for index in range(num_env):
self.inputs.append(obses[index])
self.targets.append(rewards[index])
def get_rank_probability(self, x_1, x_2):
# get probability x_1 > x_2
probs = []
for member in range(self.de):
probs.append(self.p_hat_member(x_1, x_2, member=member).cpu().numpy())
probs = np.array(probs)
return np.mean(probs, axis=0), np.std(probs, axis=0)
def get_entropy(self, x_1, x_2):
# get probability x_1 > x_2
probs = []
for member in range(self.de):
probs.append(self.p_hat_entropy(x_1, x_2, member=member).cpu().numpy())
probs = np.array(probs)
return np.mean(probs, axis=0), np.std(probs, axis=0)
def p_hat_member(self, x_1, x_2, member=-1):
# softmaxing to get the probabilities according to eqn 1
with torch.no_grad():
r_hat1 = self.r_hat_member(x_1, member=member)
r_hat2 = self.r_hat_member(x_2, member=member)
r_hat1 = r_hat1.sum(axis=1)
r_hat2 = r_hat2.sum(axis=1)
r_hat = torch.cat([r_hat1, r_hat2], axis=-1)
# taking 0 index for probability x_1 > x_2
return F.softmax(r_hat, dim=-1)[:,0]
def p_hat_entropy(self, x_1, x_2, member=-1):
# softmaxing to get the probabilities according to eqn 1
with torch.no_grad():
r_hat1 = self.r_hat_member(x_1, member=member)
r_hat2 = self.r_hat_member(x_2, member=member)
r_hat1 = r_hat1.sum(axis=1)
r_hat2 = r_hat2.sum(axis=1)
r_hat = torch.cat([r_hat1, r_hat2], axis=-1)
ent = F.softmax(r_hat, dim=-1) * F.log_softmax(r_hat, dim=-1)
ent = ent.sum(axis=-1).abs()
return ent
def r_hat_member(self, x, member=-1):
# the network parameterizes r hat in eqn 1 from the paper
return self.ensemble[member](torch.from_numpy(x).float().to(device))
def r_hat(self, x):
# they say they average the rewards from each member of the ensemble, but I think this only makes sense if the rewards are already normalized
# but I don't understand how the normalization should be happening right now :(
r_hats = []
for member in range(self.de):
r_hats.append(self.r_hat_member(x, member=member).detach().cpu().numpy())
r_hats = np.array(r_hats)
return np.mean(r_hats)
def r_hat_batch(self, x):
# they say they average the rewards from each member of the ensemble, but I think this only makes sense if the rewards are already normalized
# but I don't understand how the normalization should be happening right now :(
r_hats = []
for member in range(self.de):
r_hats.append(self.r_hat_member(x, member=member).detach().cpu().numpy())
r_hats = np.array(r_hats)
return np.mean(r_hats, axis=0)
def save(self, model_dir, step):
for member in range(self.de):
torch.save(
self.ensemble[member].state_dict(), '%s/reward_model_%s_%s.pt' % (model_dir, step, member)
)
def load(self, model_dir, step):
for member in range(self.de):
self.ensemble[member].load_state_dict(
torch.load('%s/reward_model_%s_%s.pt' % (model_dir, step, member))
)
def get_train_acc(self):
ensemble_acc = np.array([0 for _ in range(self.de)])
max_len = self.capacity if self.buffer_full else self.buffer_index
total_batch_index = np.random.permutation(max_len)
batch_size = 256
num_epochs = int(np.ceil(max_len/batch_size))
total = 0
for epoch in range(num_epochs):
last_index = (epoch+1)*batch_size
if (epoch+1)*batch_size > max_len:
last_index = max_len
sa_t_1 = self.buffer_seg1[epoch*batch_size:last_index]
sa_t_2 = self.buffer_seg2[epoch*batch_size:last_index]
labels = self.buffer_label[epoch*batch_size:last_index]
labels = torch.from_numpy(labels.flatten()).long().to(device)
total += labels.size(0)
for member in range(self.de):
# get logits
r_hat1 = self.r_hat_member(sa_t_1, member=member)
r_hat2 = self.r_hat_member(sa_t_2, member=member)
r_hat1 = r_hat1.sum(axis=1)
r_hat2 = r_hat2.sum(axis=1)
r_hat = torch.cat([r_hat1, r_hat2], axis=-1)
_, predicted = torch.max(r_hat.data, 1)
correct = (predicted == labels).sum().item()
ensemble_acc[member] += correct
ensemble_acc = ensemble_acc / total
return np.mean(ensemble_acc)
def get_queries(self, mb_size=20):
len_traj, max_len = len(self.inputs[0]), len(self.inputs)
img_t_1, img_t_2 = None, None
if len(self.inputs[-1]) < len_traj:
max_len = max_len - 1
# get train traj
train_inputs = np.array(self.inputs[:max_len])
train_targets = np.array(self.targets[:max_len])
batch_index_2 = np.random.choice(max_len, size=mb_size, replace=True)
sa_t_2 = train_inputs[batch_index_2] # Batch x T x dim of s&a
r_t_2 = train_targets[batch_index_2] # Batch x T x 1
batch_index_1 = np.random.choice(max_len, size=mb_size, replace=True)
sa_t_1 = train_inputs[batch_index_1] # Batch x T x dim of s&a
r_t_1 = train_targets[batch_index_1] # Batch x T x 1
sa_t_1 = sa_t_1.reshape(-1, sa_t_1.shape[-1]) # (Batch x T) x dim of s&a
r_t_1 = r_t_1.reshape(-1, r_t_1.shape[-1]) # (Batch x T) x 1
sa_t_2 = sa_t_2.reshape(-1, sa_t_2.shape[-1]) # (Batch x T) x dim of s&a
r_t_2 = r_t_2.reshape(-1, r_t_2.shape[-1]) # (Batch x T) x 1
# Generate time index
time_index = np.array([list(range(i*len_traj,
i*len_traj+self.size_segment)) for i in range(mb_size)])
time_index_2 = time_index + np.random.choice(len_traj-self.size_segment, size=mb_size, replace=True).reshape(-1,1)
time_index_1 = time_index + np.random.choice(len_traj-self.size_segment, size=mb_size, replace=True).reshape(-1,1)
sa_t_1 = np.take(sa_t_1, time_index_1, axis=0) # Batch x size_seg x dim of s&a
r_t_1 = np.take(r_t_1, time_index_1, axis=0) # Batch x size_seg x 1
sa_t_2 = np.take(sa_t_2, time_index_2, axis=0) # Batch x size_seg x dim of s&a
r_t_2 = np.take(r_t_2, time_index_2, axis=0) # Batch x size_seg x 1
return sa_t_1, sa_t_2, r_t_1, r_t_2
def put_queries(self, sa_t_1, sa_t_2, labels):
total_sample = sa_t_1.shape[0]
next_index = self.buffer_index + total_sample
if next_index >= self.capacity:
self.buffer_full = True
maximum_index = self.capacity - self.buffer_index
np.copyto(self.buffer_seg1[self.buffer_index:self.capacity], sa_t_1[:maximum_index])
np.copyto(self.buffer_seg2[self.buffer_index:self.capacity], sa_t_2[:maximum_index])
np.copyto(self.buffer_label[self.buffer_index:self.capacity], labels[:maximum_index])
remain = total_sample - (maximum_index)
if remain > 0:
np.copyto(self.buffer_seg1[0:remain], sa_t_1[maximum_index:])
np.copyto(self.buffer_seg2[0:remain], sa_t_2[maximum_index:])
np.copyto(self.buffer_label[0:remain], labels[maximum_index:])
self.buffer_index = remain
else:
np.copyto(self.buffer_seg1[self.buffer_index:next_index], sa_t_1)
np.copyto(self.buffer_seg2[self.buffer_index:next_index], sa_t_2)
np.copyto(self.buffer_label[self.buffer_index:next_index], labels)
self.buffer_index = next_index
def get_label(self, sa_t_1, sa_t_2, r_t_1, | |
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source(
client_options
)
api_key_value = getattr(client_options, "api_key", None)
if api_key_value and credentials:
raise ValueError(
"client_options.api_key and credentials are mutually exclusive"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, ProjectsTransport):
# transport is a ProjectsTransport instance.
if credentials or client_options.credentials_file or api_key_value:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
if client_options.scopes:
raise ValueError(
"When providing a transport instance, provide its scopes "
"directly."
)
self._transport = transport
else:
import google.auth._default # type: ignore
if api_key_value and hasattr(
google.auth._default, "get_api_key_credentials"
):
credentials = google.auth._default.get_api_key_credentials(
api_key_value
)
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
always_use_jwt_access=True,
)
def get_project(
self,
request: Union[projects.GetProjectRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> projects.Project:
r"""Retrieves the project identified by the specified ``name`` (for
example, ``projects/415104041262``).
The caller must have ``resourcemanager.projects.get`` permission
for this project.
Args:
request (Union[google.cloud.resourcemanager_v3.types.GetProjectRequest, dict]):
The request object. The request sent to the
[GetProject][google.cloud.resourcemanager.v3.Projects.GetProject]
method.
name (str):
Required. The name of the project (for example,
``projects/415104041262``).
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.resourcemanager_v3.types.Project:
A project is a high-level Google
Cloud entity. It is a container for
ACLs, APIs, App Engine Apps, VMs, and
other Google Cloud Platform resources.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a projects.GetProjectRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, projects.GetProjectRequest):
request = projects.GetProjectRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_project]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def list_projects(
self,
request: Union[projects.ListProjectsRequest, dict] = None,
*,
parent: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListProjectsPager:
r"""Lists projects that are direct children of the specified folder
or organization resource. ``list()`` provides a strongly
consistent view of the projects underneath the specified parent
resource. ``list()`` returns projects sorted based upon the
(ascending) lexical ordering of their ``display_name``. The
caller must have ``resourcemanager.projects.list`` permission on
the identified parent.
Args:
request (Union[google.cloud.resourcemanager_v3.types.ListProjectsRequest, dict]):
The request object. The request sent to the
[ListProjects][google.cloud.resourcemanager.v3.Projects.ListProjects]
method.
parent (str):
Required. The name of the parent
resource to list projects under.
For example, setting this field to
'folders/1234' would list all projects
directly under that folder.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.resourcemanager_v3.services.projects.pagers.ListProjectsPager:
A page of the response received from the
[ListProjects][google.cloud.resourcemanager.v3.Projects.ListProjects]
method.
A paginated response where more pages are available
has next_page_token set. This token can be used in a
subsequent request to retrieve the next request page.
NOTE: A response may contain fewer elements than the
request page_size and still have a next_page_token.
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a projects.ListProjectsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, projects.ListProjectsRequest):
request = projects.ListProjectsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_projects]
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListProjectsPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
def search_projects(
self,
request: Union[projects.SearchProjectsRequest, dict] = None,
*,
query: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.SearchProjectsPager:
r"""Search for projects that the caller has both
``resourcemanager.projects.get`` permission on, and also satisfy
the specified query.
This method returns projects in an unspecified order.
This method is eventually consistent with project mutations;
this means that a newly created project may not appear in the
results or recent updates to an existing project may not be
reflected in the results. To retrieve the latest state of a
project, use the
[GetProject][google.cloud.resourcemanager.v3.Projects.GetProject]
method.
Args:
request (Union[google.cloud.resourcemanager_v3.types.SearchProjectsRequest, dict]):
The request object. The request sent to the
[SearchProjects][google.cloud.resourcemanager.v3.Projects.SearchProjects]
method.
query (str):
Optional. A query string for searching for projects that
the caller has ``resourcemanager.projects.get``
permission to. If multiple fields are included in the
query, the it will return results that match any of the
fields. Some eligible fields are:
::
| Field | Description |
|-------------------------|----------------------------------------------|
| displayName, name | Filters by displayName. |
| parent | Project's parent. (for example: folders/123,
organizations/*) Prefer parent field over parent.type and parent.id. |
| parent.type | Parent's type: `folder` or `organization`. |
| parent.id | Parent's id number (for example: 123) |
| id, projectId | Filters by projectId. |
| state, lifecycleState | Filters by state. |
| labels | Filters by label name or value. |
| labels.<key> (where *key* is the name of a label) | Filters by label
name. |
Search expressions are case insensitive.
Some examples queries:
::
| Query | Description |
|------------------|-----------------------------------------------------|
| name:how* | The project's name starts with "how". |
| name:Howl | The project's name is `Howl` or `howl`. |
| name:HOWL | Equivalent to above. |
| NAME:howl | Equivalent to above. |
| labels.color:* | The | |
= scene.camera
if self.renderFrom == 'TAB':
bpy.context.space_data.camera = bpy.data.objects[scene.camera.name]
for area in bpy.context.screen.areas:
if area.type == 'VIEW_3D':
context.area.spaces[0].region_3d.view_perspective = 'CAMERA'
break
for marker in marker_list:
if self._chosenCamera == marker.camera:
scene.camera = marker.camera
scene.frame_current = marker.frame
scene.render.filepath = self.path + scene.camera.name
bpy.ops.render.render("INVOKE_DEFAULT", write_still=self._autoSaveRender)
return {"PASS_THROUGH"}
marker = None
scene.frame_current = 0
current_frame = scene.frame_current
for m in reversed(sorted(filter(lambda m: m.frame <= current_frame,scene.timeline_markers),key=lambda m: m.frame)):
marker = m
break
marker_name = scene.camera.name
if marker and (marker.frame == current_frame):
marker.name = marker_name
else:
marker = scene.timeline_markers.new(marker_name)
marker.frame = scene.frame_current
marker.camera = scene.camera
marker.select = True
for other_marker in [m for m in scene.timeline_markers if m != marker]:
other_marker.select = False
scene.render.filepath = self.path + scene.camera.name
bpy.ops.render.render("INVOKE_DEFAULT", write_still=self._autoSaveRender)
elif len(marker_list) == 0 :
bpy.ops.object.select_all(action='DESELECT')
self._chosenCamera.select_set(state = True)
bpy.context.view_layer.objects.active = scene.camera
if self.renderFrom in ('TAB', 'CAMANAGER'):
bpy.context.space_data.camera = bpy.data.objects[scene.camera.name]
for area in bpy.context.screen.areas:
if area.type == 'VIEW_3D':
context.area.spaces[0].region_3d.view_perspective = 'CAMERA'
break
scene.render.filepath = self.path + scene.camera.name
bpy.ops.render.render("INVOKE_DEFAULT", write_still=self._autoSaveRender)
return {"PASS_THROUGH"}
# TOGGLE RENDER ORIENTATION ##################################################################################
class MYBIGBUTTONTAB_OT_toggle_orientation(Operator):
bl_idname = "render.toggle_orientation"
bl_label = "Toggle Orientation"
bl_description = (" \u2022 shift + click: Square dimensions \n"
" (H Dimension as reference)")
#bl_options = {'UNDO'}}
def invoke(self, context, event):
scene = context.scene
rs = scene.RBTab_Settings
render = scene.render
if event.shift:
render.resolution_y = render.resolution_x
render.pixel_aspect_y = render.pixel_aspect_x
return {'FINISHED'}
if rs.switchRenderRotation_prop == False:
x = render.resolution_x
y = render.resolution_y
pa_x = render.pixel_aspect_x
pa_y = render.pixel_aspect_y
render.resolution_x = y
render.resolution_y = x
render.pixel_aspect_x = pa_y
render.pixel_aspect_y = pa_x
rs.switchRenderRotation_prop = True
elif rs.switchRenderRotation_prop == True:
x = render.resolution_y
y = render.resolution_x
pa_x = render.pixel_aspect_y
pa_y = render.pixel_aspect_x
render.resolution_x = x
render.resolution_y = y
render.pixel_aspect_x = pa_x
render.pixel_aspect_y = pa_y
rs.switchRenderRotation_prop = False
return {'FINISHED'}
# STORE DEFAULT DIMENSION ##################################################################################
class MYBIGBUTTONTAB_OT_store_defaultres(Operator):
bl_idname = "render.store_as_defaultres"
bl_label = "Set Current Resolution as Default"
#bl_options = {'UNDO'}
bl_description = (" \u2022 Shift + Click: Recover Last")
@classmethod
def poll(cls, context):
return context.active_object is not None
def invoke(self, context, event):
scene = context.scene
rs = scene.RBTab_Settings
rd = scene.render
if event.shift:
rd.resolution_x = rs.Default_HRes_prop
rd.resolution_y = rs.Default_VRes_prop
rd.pixel_aspect_x = rs.Default_HPixRes_prop
rd.pixel_aspect_y = rs.Default_VPixRes_prop
else:
rs.Default_HRes_prop = rd.resolution_x
rs.Default_VRes_prop = rd.resolution_y
rs.Default_HPixRes_prop = rd.pixel_aspect_x
rs.Default_VPixRes_prop = rd.pixel_aspect_y
return {'FINISHED'}
# CUSTOM CAMERA RESOLUTION ##################################################################################
class SCENECAMERA_OT_CustomResolution(Operator):
bl_idname = "cameramanager.custom_resolution"
bl_label = "Custom Resolution"
bl_description = "Set current resolution as custom camera resolution"
#bl_options = {'UNDO'}
crrefresh : bpy.props.BoolProperty(default = False)
crdel : bpy.props.BoolProperty(default = False)
def invoke(self, context, event):
scene = context.scene
render = scene.render
ob = context.active_object
rs = scene.RBTab_Settings
cs = ob.RBTab_obj_Settings
x = render.resolution_x
y = render.resolution_y
pa_x = render.pixel_aspect_x
pa_y = render.pixel_aspect_y
cameras = sorted([o for o in scene.objects if o.type == 'CAMERA'],key=lambda o: o.name)
selectedObj = bpy.context.selected_objects
selectedCam = sorted([o for o in selectedObj if o.type == 'CAMERA'],key=lambda o: o.name)
noCustomDimCam = sorted([o for o in cameras if o.RBTab_obj_Settings.Custom_CamRes_prop == False],key=lambda o: o.name)
selectedCustomDimCam = list(set(selectedCam) - set(noCustomDimCam))
_cameras = []
if self.crdel == True:
if event.alt: _cameras = selectedCustomDimCam
else : _cameras.append(context.active_object)
for camera in _cameras :
cs = camera.RBTab_obj_Settings
cs.Custom_CamRes_prop = False
cs.Custom_CamHRes_prop = rs.Default_HRes_prop
cs.Custom_CamVRes_prop = rs.Default_VRes_prop
cs.Custom_CamHPixRes_prop = rs.Default_HPixRes_prop
cs.Custom_CamVPixRes_prop = rs.Default_VPixRes_prop
render.resolution_x = rs.Default_HRes_prop
render.resolution_y = rs.Default_VRes_prop
render.pixel_aspect_x = rs.Default_HPixRes_prop
render.pixel_aspect_y = rs.Default_VPixRes_prop
self.crdel = False
return {'FINISHED'}
if cs.Custom_CamRes_prop == False:
if event.alt: _cameras = selectedCam
else : _cameras.append(context.active_object)
for camera in _cameras :
cs = camera.RBTab_obj_Settings
cs.Custom_CamHRes_prop = x
cs.Custom_CamVRes_prop = y
cs.Custom_CamHPixRes_prop = pa_x
cs.Custom_CamVPixRes_prop = pa_y
cs.Custom_CamRes_prop = True
return {'FINISHED'}
elif cs.Custom_CamRes_prop == True:
if self.crrefresh == False:
return {'FINISHED'}
elif self.crrefresh == True:
cs.Custom_CamHRes_prop = x
cs.Custom_CamVRes_prop = y
cs.Custom_CamHPixRes_prop = pa_x
cs.Custom_CamVPixRes_prop = pa_y
self.crrefresh = False
return {'FINISHED'}
#EVENTS AFTER RENDER##################################################################################
class RENDEREVENTS_OT_endEvents(Operator):
bl_description = 'Play sound and/or Power Off'
bl_idname = 'renderevents.end_events'
bl_label = 'Events After Render'
_stop = False
_play = False
_timer = None
_timeout = None
handle = None
if platform.system().startswith('Win'): OS ='WINDOWS'
elif platform.system().startswith('Lin'):OS ='LINUX'
else : OS ='MacOS'
testSoundToPlay: bpy.props.BoolProperty(default = False)
# @classmethod
# def poll(cls, context):
# return context.scene.RBTab_Settings.soundToPlay != ''
def modal(self, context, event):
scene = context.scene
rs = scene.RBTab_Settings
if event.type == 'ESC' or rs.abortAlarm == True:
context.window_manager.event_timer_remove(self._timer)
if rs.playAfterRender == True: self.handle.stop()
rs.alarmInProgress = False
rs.abortAlarm = False
rs.countDownAfterRender = 0
self.testSoundToPlay = False
bpy.ops.wm.redraw_timer(type='DRAW_WIN_SWAP', iterations=2)
#print("________________Abort")
return {'FINISHED'}
elif event.type =='TIMER':
if self._play == True:
if self._stop == False and self.handle.status == False:
context.window_manager.event_timer_remove(self._timer)
rs.alarmInProgress = False
rs.abortAlarm = False
self.testSoundToPlay = False
bpy.ops.wm.redraw_timer(type='DRAW_WIN_SWAP', iterations=2)
#print("________________Abort")
return {'FINISHED'}
elif self._stop == True and self.testSoundToPlay == False:
if self.handle.status == False:
bpy.ops.wm.redraw_timer(type='DRAW_WIN_SWAP', iterations=1)
self._timeout -= 1
rs.countDownAfterRender = self._timeout
print(self._timeout)
if self._timeout == 0:
rs.countDownAfterRender = 0
rs.abortAlarm = False
if self.OS == 'WINDOWS':
print(self.OS)
subprocess.call('shutdown /s /f')
elif self.OS == 'LINUX':
print(self.OS)
#bpy.ops.wm.quit_blender()
os.system('shutdown -h now')
elif self.OS == 'MacOS':
print(self.OS)
subprocess.call(['osascript', '-e','tell app "System Events" to shut down'])
rs.alarmInProgress = False
bpy.ops.wm.redraw_timer(type='DRAW_WIN_SWAP', iterations=2)
#rs.countDownAfterRender = 0
#print("________________STOP")
self.handle.stop()
context.window_manager.event_timer_remove(self._timer)
return {'FINISHED'}
elif self._play == False and self.testSoundToPlay == False:
bpy.ops.wm.redraw_timer(type='DRAW_WIN_SWAP', iterations=1)
rs.countDownAfterRender = self._timeout
self._timeout -= 1
print(self._timeout)
if self._timeout == 0:
rs.alarmInProgress = False
bpy.ops.wm.redraw_timer(type='DRAW_WIN_SWAP', iterations=2)
rs.countDownAfterRender = 0
if self.OS == 'WINDOWS':
print(self.OS)
subprocess.call('shutdown /s /f')
elif self.OS == 'LINUX':
print(self.OS)
bpy.ops.wm.quit_blender()
#os.system('shutdown -h now')
elif self.OS == 'MacOS':
print(self.OS)
subprocess.call(['osascript', '-e','tell app "System Events" to shut down'])
#print("________________STOP")
context.window_manager.event_timer_remove(self._timer)
return {'FINISHED'}
return {'PASS_THROUGH'}
def invoke(self, context, event):
scene = context.scene
rs = scene.RBTab_Settings
self._timeout = rs.timeoutPowerOff
if rs.soundToPlay !='':
a,soundType = os.path.splitext(rs.soundToPlay)
soundExt = bpy.path.extensions_audio
### Save a Copy of current file with "_powerOff" suffix before shutdown IF is dirty[---
if rs.poweroffAfterRender and bpy.data.is_dirty and self.testSoundToPlay == False:
_name,_ext = os.path.splitext(bpy.path.basename(bpy.context.blend_data.filepath))
_path = os.path.dirname(bpy.data.filepath)
_name = _name + "_PowerOff" + _ext
_pathName = os.path.join(_path,_name)
bpy.ops.wm.save_as_mainfile(filepath=_pathName,copy=True)
### ]Save a Copy
if rs.playAfterRender == True:
if (str.lower(soundType) in soundExt) and os.path.exists(bpy.path.abspath(rs.soundToPlay)) == True:
soundToPlay = bpy.path.abspath(rs.soundToPlay)
if self._play == False:
device = aud.Device()
sound = aud.Sound(os.path.normpath(soundToPlay))
self.handle = device.play(sound.volume(80))
if rs.loopSoundToPlay == True and rs.poweroffAfterRender == True: rs.loopSoundToPlay = False
if rs.loopSoundToPlay == True and rs.poweroffAfterRender == False: self.handle.loop_count = -1
else: self.handle.loop_count = rs.repeatSoundToPlay
self._play = True
else:
rs.soundToPlay = ''
self.testSoundToPlay == False
ShowMessageBox("Choose a sound file before !", "Wrong Sound File Type OR Not Exist", 'ERROR')
self.report({"WARNING"}, 'Wrong Sound File Type OR Not Exist')
return {"CANCELLED"}
if rs.poweroffAfterRender == True and self.testSoundToPlay == False: self._stop = True
else: self._stop = True
rs.alarmInProgress = True
rs.countDownAfterRender = 0
bpy.ops.wm.redraw_timer(type='DRAW_WIN_SWAP', iterations=2)
self._timer = context.window_manager.event_timer_add(1, window=context.window)
context.window_manager.modal_handler_add(self)
return {'RUNNING_MODAL'}
# Null ##################################################################################
class SCENECAMERA_OT_Null(Operator):
bl_idname = "cameramanager.null_tool"
bl_label = ""
bl_description = "Camera Manager"
nullMode : bpy.props.StringProperty(name="tool", default="")
def invoke(self, context, event):
scene = context.scene
chosen_camera = context.active_object
selectedObj = context.selected_objects
cameras = sorted([o for o in scene.objects if o.type == 'CAMERA'],key=lambda o: o.name)
selectedCam = sorted([o for o in selectedObj if o.type == 'CAMERA'],key=lambda o: o.name)
if self.nullMode == 'SELECT':
if chosen_camera not in selectedCam:
if event.alt:
bpy.ops.cameramanager.select_tool("INVOKE_DEFAULT",selectTool = "INVERT")
elif event.shift:
bpy.ops.cameramanager.select_tool("INVOKE_DEFAULT",selectTool = "ALL")
else:
bpy.ops.cameramanager.select_tool("INVOKE_DEFAULT",selectTool = "NONE")
elif self.nullMode == 'NOTSELECTED':
self.report({"INFO"}, 'Select Camera Before !')
elif self.nullMode == 'NULL':
self.nullMode == ''
return {"FINISHED"}
self.nullMode == ''
| |
# -*- coding: UTF-8 -*-
import maya.cmds as cmds
import maya.mel as mel
import socket
import pymel.core as pm
import os
import maya.OpenMayaUI as omui
from shiboken2 import wrapInstance
from inspect import getsourcefile
from PySide2 import QtCore
from PySide2 import QtUiTools
from PySide2 import QtWidgets
def maya_main_window():
main_window_ptr = omui.MQtUtil.mainWindow()
winptr = wrapInstance(long(main_window_ptr), QtWidgets.QWidget)
return winptr
class DesignerUI(QtWidgets.QDialog):
# AutoRig용 마야 파일
CONTROLRIG_MAYA_FILE = r'C:\Users\naong\Documents\maya\2020\scripts\IMTool\IMUtility\JBControlRigV0.2.mb'
ICTBlendShapeList = ['eyeBlinkLeft', 'eyeBlinkRight', 'eyeLookDownLeft', 'eyeLookDownRight', 'eyeLookInLeft',
'eyeLookInRight', 'eyeLookOutLeft', 'eyeLookOutRight', 'eyeLookUpLeft', 'eyeLookUpRight',
'eyeSquintLeft', 'eyeSquintRight', 'eyeWideLeft', 'eyeWideRight', 'browDownLeft',
'browDownRight', 'browInnerUp', 'browInnerUpLeft', 'browInnerUpRight', 'browOuterUpLeft',
'browOuterUpRight', 'jawOpen', 'jawRight', 'jawForward', 'jawLeft', 'mouthClose',
'mouthDimpleLeft', 'mouthDimpleRight', 'mouthFrownLeft', 'mouthFrownRight', 'mouthFunnel',
'mouthLeft', 'mouthLowerDownLeft', 'mouthLowerDownRight', 'mouthPressLeft', 'mouthPressRight',
'mouthPucker', 'mouthRight', 'mouthRollLower', 'mouthRollUpper', 'mouthShrugLower',
'mouthShrugUpper', 'mouthSmileLeft', 'mouthSmileRight', 'mouthStretchLeft',
'mouthStretchRight', 'mouthUpperUpLeft', 'mouthUpperUpRight', 'cheekPuff', 'cheekPuffLeft',
'cheekPuffRight', 'cheekRaiserLeft', 'cheekRaiserRight', 'cheekSquintLeft', 'cheekSquintRight',
'noseSneerLeft', 'noseSneerRight', 'tongueOut']
# 블렌드쉐입을 추가하고, Merge시켜질 애들리스트
otherMeshes = ['cc_base_eye', 'cc_base_teeth', 'cc_base_tongue', 'cc_base_tearline', 'cc_base_eyeocclusion',
'combine_eyePoint', 'combine_eyelash_upper', 'combine_eyelash_lower']
hairMeshes = []
result_hairMeshes = []
Face = [u'cc_base_body1.f[0:1637]', u'cc_base_body1.f[1668:3690]', u'cc_base_body1.f[3721:4157]']
EyelashUpper = [u'cc_base_body1.f[13513:13538]', u'cc_base_body1.f[13540:13583]', u'cc_base_body1.f[13638:13642]',
u'cc_base_body1.f[13647:13651]', u'cc_base_body1.f[13655:13734]', u'cc_base_body1.f[13813:13838]',
u'cc_base_body1.f[13840:13883]', u'cc_base_body1.f[13938:13942]', u'cc_base_body1.f[13947:13951]',
u'cc_base_body1.f[13955:14034]']
EyelashLower = [u'cc_base_body1.f[13446:13512]', u'cc_base_body1.f[13539]', u'cc_base_body1.f[13584:13637]',
u'cc_base_body1.f[13643:13646]', u'cc_base_body1.f[13652:13654]',
u'cc_base_body1.f[13735:13812]', u'cc_base_body1.f[13839]', u'cc_base_body1.f[13884:13937]',
u'cc_base_body1.f[13943:13946]', u'cc_base_body1.f[13952:13954]',
u'cc_base_body1.f[14035:14045]']
EyePoint = [u'cc_base_body1.f[1638:1667]', u'cc_base_body1.f[3691:3720]']
JBControlRigList_v2 = [u'mouth_press_R_ctl', u'mouth_press_L_ctl', u'cheekRaiser_R_ctl', u'eyelid_Up_L_ctl',
u'eyeSquint_R_ctl', u'eyeSquint_L_ctl', u'mouthPucker_ctl', u'mouthDimple_L_ctl',
u'cheekRaiser_L_ctl', u'cheekSquint_L_ctl', u'cheekSquint_R_ctl', u'browOuterUp_L_ctl',
u'browOuterUp_R_ctl', u'browInnerUp_L_ctl', u'browInnerUp_R_ctl', u'eyelid_Up_R_ctl',
u'eye_control', u'mouthClose_ctl', u'mouth_end_R_ctrl', u'mouth_end_L_ctrl',
u'mouthLowerDown_R_ctl', u'cheekPuff_R_ctl', u'cheekPuff_L_ctl', u'noseSneer_R_ctl',
u'noseSneer_L_ctl', u'jaw_ctl', u'mouthDimple_R_ctl', u'mouth_L_R_ctl',
u'mouthLowerDown_L_ctl', u'mouthShrugLower_ctl', u'mouthShrugUpper_ctl',
u'mouthUpperUp_R_ctl', u'mouthUpperUp_L_ctl', u'mouthRollUpper_ctl', u'mouthFunnel_ctl',
u'mouthRollLower_ctl', u'Jaw_F_ctl']
# Control rig와 Blendshape 연결 정보
JBControlConnectionInfo = [[u'BS_node.eyeBlinkLeft', u'BS_node_eyeBlinkLeft'],
[u'BS_node.eyeLookDownLeft', u'BS_node_eyeLookDownLeft'],
[u'BS_node.eyeLookInLeft', u'BS_node_eyeLookInLeft'],
[u'BS_node.eyeLookOutLeft', u'BS_node_eyeLookOutLeft'],
[u'BS_node.eyeLookUpLeft', u'BS_node_eyeLookUpLeft'],
[u'BS_node.eyeSquintLeft', u'BS_node_eyeSquintLeft'],
[u'BS_node.eyeWideLeft', u'BS_node_eyeWideLeft'],
[u'BS_node.eyeBlinkRight', u'BS_node_eyeBlinkRight'],
[u'BS_node.eyeLookDownRight', u'BS_node_eyeLookDownRight'],
[u'BS_node.eyeLookInRight', u'BS_node_eyeLookInRight'],
[u'BS_node.eyeLookOutRight', u'BS_node_eyeLookOutRight'],
[u'BS_node.eyeLookUpRight', u'BS_node_eyeLookUpRight'],
[u'BS_node.eyeSquintRight', u'BS_node_eyeSquintRight'],
[u'BS_node.eyeWideRight', u'BS_node_eyeWideRight'],
[u'BS_node.jawForward', u'BS_node_jawForward'], [u'BS_node.jawLeft', u'BS_node_jawLeft'],
[u'BS_node.jawRight', u'BS_node_jawRight'], [u'BS_node.jawOpen', u'BS_node_jawOpen'],
[u'BS_node.mouthClose', u'BS_node_mouthClose'],
[u'BS_node.mouthFunnel', u'blendWeighted2'], [u'BS_node.mouthPucker', u'blendWeighted3'],
[u'BS_node.mouthRight', u'BS_node_mouthRight'],
[u'BS_node.mouthLeft', u'BS_node_mouthLeft'],
[u'BS_node.mouthSmileLeft', u'BS_node_mouthSmileLeft'],
[u'BS_node.mouthSmileRight', u'BS_node_mouthSmileRight'],
[u'BS_node.mouthFrownRight', u'BS_node_mouthFrownRight'],
[u'BS_node.mouthFrownLeft', u'BS_node_mouthFrownLeft'],
[u'BS_node.mouthDimpleLeft', u'BS_node_mouthDimpleLeft'],
[u'BS_node.mouthDimpleRight', u'BS_node_mouthDimpleRight'],
[u'BS_node.mouthStretchLeft', u'BS_node_mouthStretchLeft'],
[u'BS_node.mouthStretchRight', u'BS_node_mouthStretchRight'],
[u'BS_node.mouthRollLower', u'BS_node_mouthRollLower'],
[u'BS_node.mouthRollUpper', u'BS_node_mouthRollUpper'],
[u'BS_node.mouthShrugLower', u'blendWeighted4'],
[u'BS_node.mouthShrugUpper', u'BS_node_mouthShrugUpper'],
[u'BS_node.mouthLowerDownLeft', u'BS_node_mouthLowerDownLeft'],
[u'BS_node.mouthLowerDownRight', u'BS_node_mouthLowerDownRight'],
[u'BS_node.mouthUpperUpLeft', u'BS_node_mouthUpperUpLeft'],
[u'BS_node.mouthUpperUpRight', u'BS_node_mouthUpperUpRight'],
[u'BS_node.browDownLeft', u'BS_node_browDownLeft1'],
[u'BS_node.browDownRight', u'BS_node_browDownRight'],
[u'BS_node.browInnerUp', u'BS_node_browInnerUp'],
[u'BS_node.browOuterUpLeft', u'BS_node_browOuterUpLeft1'],
[u'BS_node.browOuterUpRight', u'BS_node_browOuterUpRight'],
[u'BS_node.cheekPuff', u'BS_node_cheekPuff'],
[u'BS_node.cheekSquintLeft', u'BS_node_cheekSquintLeft'],
[u'BS_node.cheekSquintRight', u'blendWeighted1'],
[u'BS_node.noseSneerLeft', u'BS_node_noseSneerLeft'],
[u'BS_node.noseSneerRight', u'BS_node_noseSneerRight'],
[u'BS_node.tongueOut', u'BS_node_tongueOut']]
# Control rig와 Blendshape 연결 정보 ICT 최신 컨트롤 리그 2020년 12월9일 추가
JBC_BSList = [u'BS_node.mouthFrownLeft', u'BS_node.mouthDimpleRight', u'BS_node.mouthDimpleLeft',
u'BS_node.mouthClose', u'BS_node.jawRight', u'BS_node.jawOpen', u'BS_node.jawLeft',
u'BS_node.jawForward', u'BS_node.mouthFunnel', u'BS_node.mouthFrownRight', u'BS_node.eyeWideRight',
u'BS_node.cheekRaiserLeft', u'BS_node.cheekPuffRight', u'BS_node.cheekPuffLeft',
u'BS_node.browOuterUpRight', u'BS_node.browOuterUpLeft', u'BS_node.browInnerUpRight',
u'BS_node.browInnerUpLeft', u'BS_node.browDownRight', u'BS_node.browDownLeft',
u'BS_node.noseSneerRight', u'BS_node.noseSneerLeft', u'BS_node.mouthUpperUpRight',
u'BS_node.mouthUpperUpLeft', u'BS_node.mouthStretchRight', u'BS_node.mouthStretchLeft',
u'BS_node.eyeLookInLeft', u'BS_node.eyeLookDownRight', u'BS_node.eyeLookDownLeft',
u'BS_node.eyeBlinkRight', u'BS_node.eyeBlinkLeft', u'BS_node.cheekSquintRight',
u'BS_node.cheekSquintLeft', u'BS_node.cheekRaiserRight', u'BS_node.eyeWideLeft',
u'BS_node.eyeSquintRight', u'BS_node.eyeSquintLeft', u'BS_node.eyeLookUpRight',
u'BS_node.eyeLookUpLeft', u'BS_node.eyeLookOutRight', u'BS_node.eyeLookOutLeft',
u'BS_node.eyeLookInRight', u'BS_node.mouthPucker', u'BS_node.mouthLowerDownRight',
u'BS_node.mouthPressRight', u'BS_node.mouthPressLeft', u'BS_node.mouthRollLower',
u'BS_node.mouthRight', u'BS_node.mouthRollUpper', u'BS_node.mouthSmileLeft',
u'BS_node.mouthShrugUpper', u'BS_node.mouthShrugLower', u'BS_node.mouthSmileRight',
u'BS_node.mouthLowerDownLeft', u'BS_node.mouthLeft']
JBC_CtrlList = [u'blendShape1_mouthFrown_L_Mesh.output', u'mouthDimple_R_ctl.translateY',
u'mouthDimple_L_ctl.translateY', u'mouthClose_ctl.translateY', u'blendShape1_jawRight_Mesh.output',
u'jaw_ctl.translateY', u'blendShape1_jawLeft_Mesh.output', u'Jaw_F_ctl.translateY',
u'mouthFunnel_ctl.translateY', u'blendShape1_mouthFrown_R_Mesh1.output',
u'blendShape1_eyeWide_R_Mesh.output', u'cheekRaiser_L_ctl.translateY',
u'cheekPuff_R_ctl.translateY', u'cheekPuff_L_ctl.translateY',
u'blendShape1_browOuterUp_R_Mesh.output', u'blendShape1_browOuterUp_L_Mesh.output',
u'browInnerUp_R_ctl.translateY', u'browInnerUp_L_ctl.translateY',
u'blendShape1_browDown_R_Mesh.output', u'blendShape1_browDown_L_Mesh.output',
u'noseSneer_R_ctl.translateY', u'noseSneer_L_ctl.translateY', u'mouthUpperUp_R_ctl.translateY',
u'mouthUpperUp_L_ctl.translateY', u'mouth_end_R_ctrl.translateY', u'mouth_end_L_ctrl.translateY',
u'blendShape1_eyeLookIn_L_Mesh.output', u'blendShape1_eyeLookDown_R_Mesh.output',
u'blendShape1_eyeLookDown_L_Mesh.output', u'blendShape1_eyeBlink_R_Mesh.output',
u'blendShape1_eyeBlink_L_Mesh.output', u'cheekSquint_R_ctl.translateY',
u'cheekSquint_L_ctl.translateY', u'cheekRaiser_R_ctl.translateY',
u'blendShape1_eyeWide_L_Mesh.output', u'eyeSquint_R_ctl.translateY', u'eyeSquint_L_ctl.translateY',
u'blendShape1_eyeLookUp_R_Mesh.output', u'blendShape1_eyeLookUp_L_Mesh.output',
u'blendShape1_eyeLookOut_R_Mesh.output', u'blendShape1_eyeLookOut_L_Mesh.output',
u'blendShape1_eyeLookIn_R_Mesh.output', u'mouthPucker_ctl.translateY',
u'mouthLowerDown_R_ctl.translateY', u'mouth_press_R_ctl.translateY',
u'mouth_press_L_ctl.translateY', u'mouthRollLower_ctl.translateY',
u'blendShape1_mouthRight_Mesh.output', u'mouthRollUpper_ctl.translateY',
u'blendShape1_mouthSmile_L_Mesh.output', u'mouthShrugUpper_ctl.translateY',
u'mouthShrugLower_ctl.translateY', u'blendShape1_mouthSmile_R_Mesh1.output',
u'mouthLowerDown_L_ctl.translateY', u'blendShape1_mouthLeft_Mesh.output']
# 기본 Transform
jawTrans = None
jawRot = None
# 블랜드쉐입별 Transform
jawTransOpen = None
jawRotOpen = None
jawRotLeft = None
jawRotRight = None
jawTransForward = None
# Complete버튼 컬러
buttonColor = "background-color:rgb(0, 102, 51)"
# --------------------------------------------------------------------------------------------------
# Initialize
# --------------------------------------------------------------------------------------------------
def __init__(self, ui_path=None, title_name='ICT2CC3', parent=maya_main_window()):
super(DesignerUI, self).__init__(parent)
print 'Start ICT2CC3'
# 윈도우 설정
self.setWindowTitle(title_name)
self.setMinimumSize(200, 250)
self.setWindowFlags(self.windowFlags() ^ QtCore.Qt.WindowContextHelpButtonHint)
# 순차적 실행
self.init_ui(ui_path)
self.get_Host_name_IP()
self.create_layout() # 배치및 컬러 적용
self.create_connection()
# ------------------------------------------------------------------------------------------
# UI Load
# ------------------------------------------------------------------------------------------
def init_ui(self, ui_path=None):
# UI 파일을 넣지 않을 경우 코드와 같은 이름의 ui파일로 대신 불러온다.
if ui_path:
f = QtCore.QFile(ui_path)
else:
try:
# ui_path가 없으면 현재파일위치를 찾아 확장자만 ui로 바꿔서 불러오는 것을 시도한다.
f = QtCore.QFile( os.path.abspath(getsourcefile(lambda: 0)).replace("\\", "/").replace('.py', '.ui'))
except:
print u'호출시 ui_path에 ui파일경로를 적어주거나, 같은 파일이름의 ui파일을 만들어주시면 됩니다.'
return
f.open(QtCore.QFile.ReadOnly)
loader = QtUiTools.QUiLoader()
self.ui = loader.load(f, parentWidget=self)
f.close()
def get_Host_name_IP(self):
try:
host_name = socket.gethostname()
host_ip = socket.gethostbyname(host_name)
result = "IP : " + host_ip
self.ui.label_myIP.setText(result)
except:
print("Unable to get Hostname and IP")
# ------------------------------------------------------------------------------------------
# Layout
# ------------------------------------------------------------------------------------------
def create_layout(self):
main_layout = QtWidgets.QVBoxLayout(self)
main_layout.setContentsMargins(0, 0, 0, 0)
main_layout.addWidget(self.ui)
# ------------------------------------------------------------------------------------------
# Connect Methods
# ------------------------------------------------------------------------------------------
def create_connection(self):
#CopyMode : 블랜드쉐입 추출
self.ui.pushButton_detachMeshes.clicked.connect(self.detachMeshes)
self.ui.pushButton_importICT.clicked.connect(self.importICT)
self.ui.pushButton_autoBindSkin.clicked.connect(self.autoBindSkin)
self.ui.pushButton_extractBlendshape.clicked.connect(self.copy_blendshape_withEyeTeeth)
self.ui.pushButton_copyWeight.clicked.connect(self.skinCopyWeight)
self.ui.pushButton_resetBS.clicked.connect(self.resetBS)
self.ui.pushButton_alignNormal.clicked.connect(self.alignNormal)
self.ui.pushButton_autoRig.clicked.connect(self.autoControlRig)
self.ui.pushButton_resetRig.clicked.connect(self.resetSlider)
self.ui.pushButton_saveJawOpen.clicked.connect(self.saveJawOpen)
self.ui.pushButton_saveJawLeft.clicked.connect(self.saveJawLeft)
self.ui.pushButton_saveJawRight.clicked.connect(self.saveJawRight)
self.ui.pushButton_saveJawForward.clicked.connect(self.saveJawForward)
self.ui.pushButton_jawOpen.clicked.connect(self.change_jawOpen)
self.ui.pushButton_jawLeft.clicked.connect(self.change_jawLeft)
self.ui.pushButton_jawRight.clicked.connect(self.change_jawRight)
self.ui.pushButton_jawForward.clicked.connect(self.change_jawForward)
# 헤어 추가
self.ui.pb_addHair.clicked.connect(self.add_hairMesh)
self.ui.pb_listWidgetClear.clicked.connect(self.listWidget_clear)
self.ui.pb_addHairBS.clicked.connect(self.make_blendshape_hairs)
self.ui.listWidget.itemDoubleClicked.connect(self.select_item)
self.ui.pb_bindSkinHair.clicked.connect(self.bindSkinHair)
def bindSkinHair(self):
if not cmds.ls(sl=1)[0]:
print('Select Hair')
return
# ICT_Base에 주둥이에 붙어있는 Joint들
mouthJoints = [u'MouthUp_Joint', u'MouthUp_Joint1', u'MouthUp_Joint2', u'MouthUp_Joint3', u'MouthUp_Joint4',
u'MouthUp_Joint5', u'MouthUp_Joint6', u'MouthUp_Joint7', u'MouthUp_Joint8', u'MouthUp_Joint9',
u'MouthUp_Joint10', u'MouthUp_Joint11', u'MouthUp_Joint12', u'MouthUp_Joint13',
u'MouthUp_Joint14', u'MouthUp_Joint15', u'MouthUp_Joint16', u'MouthUp_Joint17',
u'MouthUp_Joint18', u'MouthUp_Joint19', u'MouthUp_Joint20', u'MouthUp_Joint21',
u'MouthUp_Joint22', u'MouthUp_Joint23', u'MouthUp_Joint24', u'MouthUp_Joint25',
u'MouthUp_Joint26', u'MouthUp_Joint27', u'MouthUp_Joint28', u'MouthUp_Joint29',
u'MouthUp_Joint30', u'MouthUp_Joint31', u'MouthUp_Joint32', u'MouthUp_Joint33',
u'MouthUp_Joint34', u'MouthUp_Joint35', u'MouthUp_Joint36', u'MouthUp_Joint37',
u'MouthUp_Joint38', u'MouthUp_Joint39', u'MouthUp_Joint40', u'MouthUp_Joint41',
u'MouthUp_Joint42', u'MouthUp_Joint43', u'MouthUp_Joint44', u'MouthUp_Joint45',
u'MouthUp_Joint46', u'MouthUp_Joint47', u'MouthUp_Joint48', u'MouthUp_Joint49',
u'MouthUp_Joint50', u'MouthUp_Joint51', u'MouthUp_Joint52', u'MouthUp_Joint53',
u'MouthUp_Joint54', u'MouthUp_Joint55', u'MouthUp_Joint56', u'MouthUp_Joint57',
u'MouthUp_Joint58', u'MouthUp_Joint59', u'MouthUp_Joint60', u'MouthUp_Joint61',
u'MouthUp_Joint62', u'MouthUp_Joint63', u'MouthUp_Joint64', u'MouthUp_Joint65',
u'MouthUp_Joint66', u'MouthUp_Joint67', u'MouthUp_Joint68', u'MouthUp_Joint69',
u'MouthUp_Joint70', u'MouthUp_Joint71', u'MouthUp_Joint72', u'MouthUp_Joint73',
u'MouthUp_Joint74', u'MouthUp_Joint75', u'MouthUp_Joint76', u'MouthUp_Joint77',
u'MouthUp_Joint78', u'MouthUp_Joint79', u'MouthUp_Joint80', u'MouthUp_Joint81',
u'MouthUp_Joint82', u'MouthUp_Joint83', u'MouthUp_Joint84', u'MouthUp_Joint85',
u'MouthUp_Joint86', u'MouthUp_Joint87', u'MouthUp_Joint88', u'MouthUp_Joint89',
u'MouthUp_Joint90', u'MouthUp_Joint91', u'MouthUp_Joint92', u'MouthUp_Joint93',
u'MouthUp_Joint94', u'MouthUp_Joint95', u'MouthUp_Joint96', u'MouthUp_Joint97',
u'MouthUp_Joint98', u'MouthUp_Joint99', u'MouthUp_Joint100', u'MouthUp_Joint101',
u'MouthUp_Joint102', u'MouthUp_Joint103', u'MouthUp_Joint104', u'MouthUp_Joint105',
u'MouthUp_Joint106', u'MouthUp_Joint107', u'MouthUp_Joint108', u'MouthUp_Joint109',
u'MouthUp_Joint110', u'MouthUp_Joint111', u'MouthUp_Joint112']
mouthJoints.append(cmds.ls(sl=1)[0])
try:
cmds.skinCluster(mouthJoints, skinMethod=0, toSelectedBones=1)
except:
print'can not skin'
self.ui.pb_bindSkinHair.setStyleSheet(self.buttonColor)
# 선택못하도록 숨긴다
cmds.hide('resultBaseMesh')
def add_hairMesh(self):
items = cmds.ls(sl=1)
for item in items:
# 히스토리 지우기
cmds.delete(item, constructionHistory=True)
# 리스트위젯에 추가
if item not in self.hairMeshes:
self.ui.listWidget.addItem(item)
self.hairMeshes.append(item)
self.ui.pb_addHair.setStyleSheet(self.buttonColor)
def listWidget_clear(self):
self.ui.listWidget.clear()
self.hairMeshes= []
def select_item(self, item):
cmds.select(item.text())
def createWrap(self, *args, **kwargs):
influence = args[0]
surface = args[1]
shapes = cmds.listRelatives(influence, shapes=True)
influenceShape = shapes[0]
shapes = cmds.listRelatives(surface, shapes=True)
surfaceShape = shapes[0]
# create wrap deformer
weightThreshold = kwargs.get('weightThreshold', 0.0)
maxDistance = kwargs.get('maxDistance', 0.1)
exclusiveBind = kwargs.get('exclusiveBind', False)
autoWeightThreshold = kwargs.get('autoWeightThreshold', True)
falloffMode = kwargs.get('falloffMode', 0)
wrapData = cmds.deformer(surface, type='wrap')
wrapNode = wrapData[0]
cmds.setAttr(wrapNode + '.weightThreshold', weightThreshold)
cmds.setAttr(wrapNode + '.maxDistance', maxDistance)
cmds.setAttr(wrapNode + '.exclusiveBind', exclusiveBind)
cmds.setAttr(wrapNode + '.autoWeightThreshold', autoWeightThreshold)
cmds.setAttr(wrapNode + '.falloffMode', falloffMode)
cmds.connectAttr(surface + '.worldMatrix[0]', wrapNode + '.geomMatrix')
# add influence
duplicateData = cmds.duplicate(influence, name=influence + 'Base')
base = duplicateData[0]
shapes = cmds.listRelatives(base, shapes=True)
baseShape = shapes[0]
cmds.hide(base)
# create dropoff attr if it doesn't exist
if not cmds.attributeQuery('dropoff', n=influence, exists=True):
# cmds.addAttr(influence, sn='dr', ln='dropoff', dv=4.0, min=0.0, max=20.0)
cmds.addAttr(influence, sn='dr', ln='dropoff', dv=4.0, min=0.0, max=1.0)
cmds.setAttr(influence + '.dr', k=True)
# if type mesh
if cmds.nodeType(influenceShape) == 'mesh':
# create smoothness attr if it doesn't exist
if not cmds.attributeQuery('smoothness', n=influence, exists=True):
cmds.addAttr(influence, sn='smt', ln='smoothness', dv=0.0, min=0.0)
cmds.setAttr(influence + '.smt', k=True)
# create the inflType attr if it doesn't exist
if not cmds.attributeQuery('inflType', n=influence, exists=True):
cmds.addAttr(influence, at='short', sn='ift', ln='inflType', dv=2, min=1, max=2)
cmds.connectAttr(influenceShape + '.worldMesh', wrapNode + '.driverPoints[0]')
cmds.connectAttr(baseShape + '.worldMesh', wrapNode + '.basePoints[0]')
cmds.connectAttr(influence + '.inflType', wrapNode + '.inflType[0]')
cmds.connectAttr(influence + '.smoothness', wrapNode + '.smoothness[0]')
# if type nurbsCurve or nurbsSurface
if cmds.nodeType(influenceShape) == 'nurbsCurve' or cmds.nodeType(influenceShape) == 'nurbsSurface':
# create the wrapSamples attr if it doesn't exist
if not cmds.attributeQuery('wrapSamples', n=influence, exists=True):
cmds.addAttr(influence, at='short', sn='wsm', ln='wrapSamples', dv=10, min=1)
cmds.setAttr(influence + '.wsm', k=True)
cmds.connectAttr(influenceShape + '.ws', wrapNode + '.driverPoints[0]')
cmds.connectAttr(baseShape + '.ws', wrapNode + '.basePoints[0]')
cmds.connectAttr(influence + '.wsm', wrapNode + '.nurbsSamples[0]')
cmds.connectAttr(influence + '.dropoff', wrapNode + '.dropoff[0]')
# I want to return a pyNode object for the wrap deformer.
# I do not see the reason to rewrite the code here into pymel.
# return wrapNode
#return pm.nt.Wrap(wrapNode)
def make_blendshape_hairs(self):
# ICT_Base 가 아직 있다는 가정
if self.hairMeshes:
for hairmesh in self.hairMeshes:
# 얼굴에 사용했던 리스트를 초기화
baseMesh = 'ICT_Base'
# baseMesh = 'resultBaseMesh'
extractedBS = []
meshHistory = cmds.listHistory(baseMesh)
bsNodeName = cmds.ls(meshHistory, type='blendShape')[0]
bsNameList = cmds.listAttr(bsNodeName + '.w', m=True)
baseHair = cmds.duplicate(hairmesh)
# 원래 이름앞에 Result라고 붙인다
baseHair = cmds.rename(baseHair, 'result_' + hairmesh)
# 혹시 모르니 BS들을 초기화
for bs in bsNameList:
if bs in self.ICTBlendShapeList:
cmds.setAttr(bsNodeName + '.' + bs, 0)
else:
cmds.setAttr(bsNodeName + '.' + bs, 1)
# hair에 대한 bs 복사 시작
for bs in self.ICTBlendShapeList:
cmds.setAttr(bsNodeName + '.' + bs, 1)
# 복사
dup = cmds.duplicate(hairmesh)
# Blendshape 이름으로 변경
resultBSMesh = cmds.rename(dup, bs)
cmds.setAttr(bsNodeName + '.' + bs, 0)
extractedBS.append(resultBSMesh)
# 블렌드쉐입 추가
cmds.blendShape(extractedBS, baseHair, tc=True)
# 블렌드쉐입 메쉬들 삭제
cmds.delete(extractedBS)
# 완료된 수염들을 나중에 skin을 위해서 따로 저장해둔다
self.result_hairMeshes.append(baseHair)
# 작업된 헤어와 헷깔리지 않게 지우자
# for | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
#
#
################################################################################
# =============================================================================
# HeteroSecureBoostingGuest
# =============================================================================
import functools
from operator import itemgetter
import numpy as np
from federatedml.tree.tree_core.predict_cache import PredictDataCache
from federatedml.util.io_check import assert_io_num_rows_equal
from numpy import random
from arch.api.utils import log_utils
from fate_flow.entity.metric import Metric
from fate_flow.entity.metric import MetricMeta
from federatedml.feature.binning.quantile_binning import QuantileBinning
from federatedml.feature.fate_element_type import NoneType
from federatedml.loss import FairLoss
from federatedml.loss import HuberLoss
from federatedml.loss import LeastAbsoluteErrorLoss
from federatedml.loss import LeastSquaredErrorLoss
from federatedml.loss import LogCoshLoss
from federatedml.loss import SigmoidBinaryCrossEntropyLoss
from federatedml.loss import SoftmaxCrossEntropyLoss
from federatedml.loss import TweedieLoss
from federatedml.optim.convergence import converge_func_factory
from federatedml.param.evaluation_param import EvaluateParam
from federatedml.param.feature_binning_param import FeatureBinningParam
from federatedml.protobuf.generated.boosting_tree_model_meta_pb2 import BoostingTreeModelMeta
from federatedml.protobuf.generated.boosting_tree_model_meta_pb2 import ObjectiveMeta
from federatedml.protobuf.generated.boosting_tree_model_meta_pb2 import QuantileMeta
from federatedml.protobuf.generated.boosting_tree_model_param_pb2 import BoostingTreeModelParam
from federatedml.protobuf.generated.boosting_tree_model_param_pb2 import FeatureImportanceInfo
from federatedml.secureprotol import IterativeAffineEncrypt
from federatedml.secureprotol import PaillierEncrypt
from federatedml.secureprotol.encrypt_mode import EncryptModeCalculator
from federatedml.statistic import data_overview
from federatedml.transfer_variable.transfer_class.hetero_secure_boost_transfer_variable import \
HeteroSecureBoostingTreeTransferVariable
from federatedml.tree import BoostingTree
from federatedml.tree import HeteroDecisionTreeGuest
from federatedml.util import consts
from federatedml.util.classify_label_checker import ClassifyLabelChecker
from federatedml.util.classify_label_checker import RegressionLabelChecker
LOGGER = log_utils.getLogger()
class HeteroSecureBoostingTreeGuest(BoostingTree):
def __init__(self):
super(HeteroSecureBoostingTreeGuest, self).__init__()
self.convegence = None
self.y = None
self.F = None
self.predict_F = None
self.data_bin = None
self.loss = None
self.init_score = None
self.classes_dict = {}
self.classes_ = []
self.num_classes = 0
self.classify_target = "binary"
self.feature_num = None
self.encrypter = None
self.grad_and_hess = None
self.tree_dim = 1
self.tree_meta = None
self.trees_ = []
self.history_loss = []
self.bin_split_points = None
self.bin_sparse_points = None
self.encrypted_mode_calculator = None
self.predict_data_cache = PredictDataCache()
self.feature_importances_ = {}
self.role = consts.GUEST
self.transfer_variable = HeteroSecureBoostingTreeTransferVariable()
self.data_alignment_map = {}
def set_loss(self, objective_param):
loss_type = objective_param.objective
params = objective_param.params
LOGGER.info("set objective, objective is {}".format(loss_type))
if self.task_type == consts.CLASSIFICATION:
if loss_type == "cross_entropy":
if self.num_classes == 2:
self.loss = SigmoidBinaryCrossEntropyLoss()
else:
self.loss = SoftmaxCrossEntropyLoss()
else:
raise NotImplementedError("objective %s not supported yet" % (loss_type))
elif self.task_type == consts.REGRESSION:
if loss_type == "lse":
self.loss = LeastSquaredErrorLoss()
elif loss_type == "lae":
self.loss = LeastAbsoluteErrorLoss()
elif loss_type == "huber":
self.loss = HuberLoss(params[0])
elif loss_type == "fair":
self.loss = FairLoss(params[0])
elif loss_type == "tweedie":
self.loss = TweedieLoss(params[0])
elif loss_type == "log_cosh":
self.loss = LogCoshLoss()
else:
raise NotImplementedError("objective %s not supported yet" % (loss_type))
else:
raise NotImplementedError("objective %s not supported yet" % (loss_type))
def convert_feature_to_bin(self, data_instance):
LOGGER.info("convert feature to bins")
param_obj = FeatureBinningParam(bin_num=self.bin_num)
if self.use_missing:
binning_obj = QuantileBinning(param_obj, abnormal_list=[NoneType()])
else:
binning_obj = QuantileBinning(param_obj)
binning_obj.fit_split_points(data_instance)
self.data_bin, self.bin_split_points, self.bin_sparse_points = binning_obj.convert_feature_to_bin(data_instance)
LOGGER.info("convert feature to bins over")
def set_y(self):
LOGGER.info("set label from data and check label")
self.y = self.data_bin.mapValues(lambda instance: instance.label)
self.check_label()
def generate_flowid(self, round_num, tree_num):
LOGGER.info("generate flowid, flowid {}".format(self.flowid))
return ".".join(map(str, [self.flowid, round_num, tree_num]))
def check_label(self):
LOGGER.info("check label")
if self.task_type == consts.CLASSIFICATION:
self.num_classes, self.classes_ = ClassifyLabelChecker.validate_label(self.data_bin)
if self.num_classes > 2:
self.classify_target = "multinomial"
self.tree_dim = self.num_classes
range_from_zero = True
for _class in self.classes_:
try:
if _class >= 0 and _class < self.num_classes and isinstance(_class, int):
continue
else:
range_from_zero = False
break
except:
range_from_zero = False
self.classes_ = sorted(self.classes_)
if not range_from_zero:
class_mapping = dict(zip(self.classes_, range(self.num_classes)))
self.y = self.y.mapValues(lambda _class: class_mapping[_class])
else:
RegressionLabelChecker.validate_label(self.data_bin)
self.set_loss(self.objective_param)
def generate_encrypter(self):
LOGGER.info("generate encrypter")
if self.encrypt_param.method.lower() == consts.PAILLIER.lower():
self.encrypter = PaillierEncrypt()
self.encrypter.generate_key(self.encrypt_param.key_length)
elif self.encrypt_param.method.lower() == consts.ITERATIVEAFFINE.lower():
self.encrypter = IterativeAffineEncrypt()
self.encrypter.generate_key(self.encrypt_param.key_length)
else:
raise NotImplementedError("encrypt method not supported yes!!!")
self.encrypted_calculator = EncryptModeCalculator(self.encrypter, self.calculated_mode, self.re_encrypted_rate)
@staticmethod
def accumulate_f(f_val, new_f_val, lr=0.1, idx=0):
f_val[idx] += lr * new_f_val
return f_val
def update_feature_importance(self, tree_feature_importance):
for fid in tree_feature_importance:
if fid not in self.feature_importances_:
self.feature_importances_[fid] = 0
self.feature_importances_[fid] += tree_feature_importance[fid]
def update_f_value(self, new_f=None, tidx=-1, mode="train"):
LOGGER.info("update tree f value, tree idx is {}".format(tidx))
if mode == "train" and self.F is None:
if self.tree_dim > 1:
self.F, self.init_score = self.loss.initialize(self.y, self.tree_dim)
else:
self.F, self.init_score = self.loss.initialize(self.y)
else:
accumulate_f = functools.partial(self.accumulate_f,
lr=self.learning_rate,
idx=tidx)
if mode == "train":
self.F = self.F.join(new_f, accumulate_f)
else:
self.predict_F = self.predict_F.join(new_f, accumulate_f)
def compute_grad_and_hess(self):
LOGGER.info("compute grad and hess")
loss_method = self.loss
if self.task_type == consts.CLASSIFICATION:
self.grad_and_hess = self.y.join(self.F, lambda y, f_val: \
(loss_method.compute_grad(y, loss_method.predict(f_val)), \
loss_method.compute_hess(y, loss_method.predict(f_val))))
else:
self.grad_and_hess = self.y.join(self.F, lambda y, f_val:
(loss_method.compute_grad(y, f_val),
loss_method.compute_hess(y, f_val)))
def compute_loss(self):
LOGGER.info("compute loss")
if self.task_type == consts.CLASSIFICATION:
loss_method = self.loss
y_predict = self.F.mapValues(lambda val: loss_method.predict(val))
loss = loss_method.compute_loss(self.y, y_predict)
elif self.task_type == consts.REGRESSION:
if self.objective_param.objective in ["lse", "lae", "logcosh", "tweedie", "log_cosh", "huber"]:
loss_method = self.loss
loss = loss_method.compute_loss(self.y, self.F)
else:
loss_method = self.loss
y_predict = self.F.mapValues(lambda val: loss_method.predict(val))
loss = loss_method.compute_loss(self.y, y_predict)
return float(loss)
def get_grad_and_hess(self, tree_idx):
LOGGER.info("get grad and hess of tree {}".format(tree_idx))
grad_and_hess_subtree = self.grad_and_hess.mapValues(
lambda grad_and_hess: (grad_and_hess[0][tree_idx], grad_and_hess[1][tree_idx]))
return grad_and_hess_subtree
def check_convergence(self, loss):
LOGGER.info("check convergence")
if self.convegence is None:
self.convegence = converge_func_factory("diff", self.tol)
return self.convegence.is_converge(loss)
def sample_valid_features(self):
LOGGER.info("sample valid features")
if self.feature_num is None:
self.feature_num = self.bin_split_points.shape[0]
choose_feature = random.choice(range(0, self.feature_num), \
max(1, int(self.subsample_feature_rate * self.feature_num)), replace=False)
valid_features = [False for i in range(self.feature_num)]
for fid in choose_feature:
valid_features[fid] = True
return valid_features
def sync_tree_dim(self):
LOGGER.info("sync tree dim to host")
self.transfer_variable.tree_dim.remote(self.tree_dim,
role=consts.HOST,
idx=-1)
def sync_stop_flag(self, stop_flag, num_round):
LOGGER.info("sync stop flag to host, boost round is {}".format(num_round))
self.transfer_variable.stop_flag.remote(stop_flag,
role=consts.HOST,
idx=-1,
suffix=(num_round,))
def sync_predict_start_round(self, num_round):
LOGGER.info("sync predict start round {}".format(num_round))
self.transfer_variable.predict_start_round.remote(num_round,
role=consts.HOST,
idx=-1)
def fit(self, data_inst, validate_data=None):
LOGGER.info("begin to train secureboosting guest model")
self.gen_feature_fid_mapping(data_inst.schema)
self.validation_strategy = self.init_validation_strategy(data_inst, validate_data)
data_inst = self.data_alignment(data_inst)
self.convert_feature_to_bin(data_inst)
self.set_y()
self.update_f_value()
self.generate_encrypter()
self.sync_tree_dim()
self.callback_meta("loss",
"train",
MetricMeta(name="train",
metric_type="LOSS",
extra_metas={"unit_name": "iters"}))
for i in range(self.num_trees):
self.compute_grad_and_hess()
for tidx in range(self.tree_dim):
LOGGER.info("start to fit, boost round: {}, tree index: {}".format(i, tidx))
tree_inst = HeteroDecisionTreeGuest(self.tree_param)
tree_inst.set_inputinfo(self.data_bin, self.get_grad_and_hess(tidx), self.bin_split_points,
self.bin_sparse_points)
valid_features = self.sample_valid_features()
tree_inst.set_valid_features(valid_features)
tree_inst.set_encrypter(self.encrypter)
tree_inst.set_encrypted_mode_calculator(self.encrypted_calculator)
tree_inst.set_flowid(self.generate_flowid(i, tidx))
tree_inst.set_host_party_idlist(self.component_properties.host_party_idlist)
tree_inst.set_runtime_idx(self.component_properties.local_partyid)
tree_inst.fit()
tree_meta, tree_param = tree_inst.get_model()
self.trees_.append(tree_param)
if self.tree_meta is None:
self.tree_meta = tree_meta
self.update_f_value(new_f=tree_inst.predict_weights, tidx=tidx)
self.update_feature_importance(tree_inst.get_feature_importance())
loss = self.compute_loss()
self.history_loss.append(loss)
LOGGER.debug("boost round {} loss is {}".format(i, loss))
self.callback_metric("loss",
"train",
[Metric(i, loss)])
if self.validation_strategy:
self.validation_strategy.validate(self, i)
if self.validation_strategy.need_stop():
LOGGER.debug('early stopping triggered')
break
if self.n_iter_no_change is True:
if self.check_convergence(loss):
self.sync_stop_flag(True, i)
LOGGER.debug("check loss convergence on boost round {}".format(i))
break
else:
self.sync_stop_flag(False, i)
LOGGER.debug("history loss is {}".format(self.history_loss))
self.callback_meta("loss",
"train",
MetricMeta(name="train",
metric_type="LOSS",
extra_metas={"Best": min(self.history_loss)}))
if self.validation_strategy and self.validation_strategy.has_saved_best_model():
self.load_model(self.validation_strategy.cur_best_model)
LOGGER.info("end to train secureboosting guest model")
def predict_f_value(self, data_inst, cache_dataset_key):
LOGGER.debug("predict tree f value, there are {} trees".format(len(self.trees_)))
init_score = self.init_score
last_round = self.predict_data_cache.predict_data_last_round(cache_dataset_key)
rounds = len(self.trees_) // self.tree_dim
if last_round == -1:
self.predict_F = data_inst.mapValues(lambda v: init_score)
else:
LOGGER.debug("hit cache, cached round is {}".format(last_round))
if last_round >= rounds - 1:
LOGGER.debug("predict data cached, rounds is {}, total cached round is {}".format(rounds, last_round))
self.predict_F = self.predict_data_cache.predict_data_at(cache_dataset_key, min(rounds - 1, last_round))
self.sync_predict_start_round(last_round + 1)
for i in range(last_round + 1, rounds):
for tidx in range(self.tree_dim):
LOGGER.info("start to predict, boost round: {}, tree index: {}".format(i, tidx))
tree_inst = HeteroDecisionTreeGuest(self.tree_param)
tree_inst.load_model(self.tree_meta, self.trees_[i * self.tree_dim + tidx])
# tree_inst.set_tree_model(self.trees_[i * self.tree_dim + tidx])
tree_inst.set_flowid(self.generate_flowid(i, tidx))
tree_inst.set_runtime_idx(self.component_properties.local_partyid)
tree_inst.set_host_party_idlist(self.component_properties.host_party_idlist)
predict_data = tree_inst.predict(data_inst)
self.update_f_value(new_f=predict_data, tidx=tidx, mode="predict")
self.predict_data_cache.add_data(cache_dataset_key, self.predict_F)
@assert_io_num_rows_equal
def predict(self, data_inst):
LOGGER.info("start predict")
cache_dataset_key = self.predict_data_cache.get_data_key(data_inst)
if cache_dataset_key in self.data_alignment_map:
data_inst = self.data_alignment_map[cache_dataset_key]
else:
data_inst = self.data_alignment(data_inst)
header = [None] * len(self.feature_name_fid_mapping)
for idx, col in self.feature_name_fid_mapping.items():
header[idx] = col
data_inst = data_overview.header_alignment(data_inst, header)
self.data_alignment_map[cache_dataset_key] = data_inst
self.predict_f_value(data_inst, cache_dataset_key)
if self.task_type == consts.CLASSIFICATION:
loss_method = self.loss
if self.num_classes == 2:
predicts = self.predict_F.mapValues(lambda f: float(loss_method.predict(f)))
else:
predicts = self.predict_F.mapValues(lambda f: loss_method.predict(f).tolist())
elif self.task_type == consts.REGRESSION:
if self.objective_param.objective in ["lse", "lae", "huber", "log_cosh", "fair", "tweedie"]:
predicts = self.predict_F
else:
raise NotImplementedError("objective {} not supported yet".format(self.objective_param.objective))
if self.task_type == consts.CLASSIFICATION:
classes_ = self.classes_
if self.num_classes == 2:
threshold = self.predict_param.threshold
predict_result = data_inst.join(predicts, lambda inst, pred: [inst.label,
classes_[1] if pred > threshold else
classes_[0], pred,
{"0": 1 - pred, "1": pred}])
else:
predict_label = predicts.mapValues(lambda preds: classes_[np.argmax(preds)])
predict_result = data_inst.join(predicts, lambda inst, preds: [inst.label, classes_[np.argmax(preds)],
np.max(preds),
dict(zip(map(str, classes_), preds))])
elif self.task_type == consts.REGRESSION:
predict_result = data_inst.join(predicts, lambda inst, pred: [inst.label, float(pred), float(pred),
{"label": float(pred)}])
else:
raise NotImplementedError("task type {} not supported yet".format(self.task_type))
LOGGER.info("end predict")
return predict_result
def get_feature_importance(self):
return self.feature_importances_
def get_model_meta(self):
model_meta | |
matrix=_create_matrices, name=name, deterministic=deterministic,
random_state=random_state)
class Emboss(Convolve):
"""
Emboss images and alpha-blend the result with the original input images.
The embossed version pronounces highlights and shadows,
letting the image look as if it was recreated on a metal plate ("embossed").
dtype support::
See ``imgaug.augmenters.convolutional.Convolve``.
Parameters
----------
alpha : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Blending factor of the embossed image. At ``0.0``, only the original
image is visible, at ``1.0`` only its embossed version is visible.
* If a number, exactly that value will always be used.
* If a tuple ``(a, b)``, a random value will be sampled from the
interval ``[a, b]`` per image.
* If a list, a random value will be sampled from that list
per image.
* If a ``StochasticParameter``, a value will be sampled from that
parameter per image.
strength : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Parameter that controls the strength of the embossing.
Sane values are somewhere in the interval ``[0.0, 2.0]`` with ``1.0``
being the standard embossing effect. Default value is ``1.0``.
* If a number, exactly that value will always be used.
* If a tuple ``(a, b)``, a random value will be sampled from the
interval ``[a, b]`` per image.
* If a list, then a random value will be sampled from that list
per image.
* If a ``StochasticParameter``, a value will be sampled from the
parameter per image.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.bit_generator.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.Emboss(alpha=(0.0, 1.0), strength=(0.5, 1.5))
Emboss an image with a strength sampled uniformly from the interval
``[0.5, 1.5]`` and alpha-blend the result with the original input image
using a random blending factor between ``0%`` and ``100%``.
"""
def __init__(self, alpha=0, strength=1,
name=None, deterministic=False, random_state=None):
alpha_param = iap.handle_continuous_param(
alpha, "alpha",
value_range=(0, 1.0), tuple_to_uniform=True, list_to_choice=True)
strength_param = iap.handle_continuous_param(
strength, "strength",
value_range=(0, None), tuple_to_uniform=True, list_to_choice=True)
def _create_matrices(_image, nb_channels, random_state_func):
alpha_sample = alpha_param.draw_sample(
random_state=random_state_func)
assert 0 <= alpha_sample <= 1.0, (
"Expected 'alpha' to be in the interval [0.0, 1.0], "
"got %.4f." % (alpha_sample,))
strength_sample = strength_param.draw_sample(
random_state=random_state_func)
matrix_nochange = np.array([
[0, 0, 0],
[0, 1, 0],
[0, 0, 0]
], dtype=np.float32)
matrix_effect = np.array([
[-1-strength_sample, 0-strength_sample, 0],
[0-strength_sample, 1, 0+strength_sample],
[0, 0+strength_sample, 1+strength_sample]
], dtype=np.float32)
matrix = (
(1-alpha_sample) * matrix_nochange
+ alpha_sample * matrix_effect
)
return [matrix] * nb_channels
super(Emboss, self).__init__(
matrix=_create_matrices, name=name, deterministic=deterministic,
random_state=random_state)
# TODO add tests
# TODO move this to edges.py?
class EdgeDetect(Convolve):
"""
Generate a black & white edge image and alpha-blend it with the input image.
dtype support::
See ``imgaug.augmenters.convolutional.Convolve``.
Parameters
----------
alpha : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Blending factor of the edge image. At ``0.0``, only the original
image is visible, at ``1.0`` only the edge image is visible.
* If a number, exactly that value will always be used.
* If a tuple ``(a, b)``, a random value will be sampled from the
interval ``[a, b]`` per image.
* If a list, a random value will be sampled from that list
per image.
* If a ``StochasticParameter``, a value will be sampled from that
parameter per image.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.bit_generator.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.EdgeDetect(alpha=(0.0, 1.0))
Detect edges in an image, mark them as black (non-edge) and white (edges)
and alpha-blend the result with the original input image using a random
blending factor between ``0%`` and ``100%``.
"""
def __init__(self, alpha=0, name=None, deterministic=False,
random_state=None):
alpha_param = iap.handle_continuous_param(
alpha, "alpha",
value_range=(0, 1.0), tuple_to_uniform=True, list_to_choice=True)
def _create_matrices(_image, nb_channels, random_state_func):
alpha_sample = alpha_param.draw_sample(
random_state=random_state_func)
assert 0 <= alpha_sample <= 1.0, (
"Expected 'alpha' to be in the interval [0.0, 1.0], "
"got %.4f." % (alpha_sample,))
matrix_nochange = np.array([
[0, 0, 0],
[0, 1, 0],
[0, 0, 0]
], dtype=np.float32)
matrix_effect = np.array([
[0, 1, 0],
[1, -4, 1],
[0, 1, 0]
], dtype=np.float32)
matrix = (
(1-alpha_sample) * matrix_nochange
+ alpha_sample * matrix_effect
)
return [matrix] * nb_channels
super(EdgeDetect, self).__init__(
matrix=_create_matrices, name=name, deterministic=deterministic,
random_state=random_state)
# TODO add tests
# TODO merge EdgeDetect and DirectedEdgeDetect?
# TODO deprecate and rename to AngledEdgeDetect
# TODO rename arg "direction" to "angle"
# TODO change direction/angle value range to (0, 360)
# TODO move this to edges.py?
class DirectedEdgeDetect(Convolve):
"""
Detect edges from specified angles and alpha-blend with the input image.
This augmenter first detects edges along a certain angle.
Usually, edges are detected in x- or y-direction, while here the edge
detection kernel is rotated to match a specified angle.
The result of applying the kernel is a black (non-edges) and white (edges)
image. That image is alpha-blended with the input image.
dtype support::
See ``imgaug.augmenters.convolutional.Convolve``.
Parameters
----------
alpha : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Blending factor of the edge image. At ``0.0``, only the original
image is visible, at ``1.0`` only the edge image is visible.
* If a number, exactly that value will always be used.
* If a tuple ``(a, b)``, a random value will be sampled from the
interval ``[a, b]`` per image.
* If a list, a random value will be sampled from that list
per image.
* If a ``StochasticParameter``, a value will be sampled from that
parameter per image.
direction : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Angle (in degrees) of edges to pronounce, where ``0`` represents
``0`` degrees and ``1.0`` represents 360 degrees (both clockwise,
starting at the top). Default value is ``(0.0, 1.0)``, i.e. pick a
random angle per image.
* If a number, exactly that value will always be used.
* If a tuple ``(a, b)``, a random value will be sampled from the
interval ``[a, b]`` will be sampled per image.
* If a list, then a random value will be sampled from that list
per image.
* If a ``StochasticParameter``, a value will be sampled from the
parameter per image.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.bit_generator.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.DirectedEdgeDetect(alpha=1.0, direction=0)
Turn input images into edge images in which edges are detected from
the top side of the image (i.e. the top sides of horizontal edges are
part of the edge image, while vertical edges are ignored).
>>> aug = iaa.DirectedEdgeDetect(alpha=1.0, direction=90/360)
Same as before, but edges are detected from the right. Horizontal edges
are now ignored.
>>> aug = iaa.DirectedEdgeDetect(alpha=1.0, direction=(0.0, 1.0))
Same as before, but edges are detected from a random angle sampled
uniformly from the interval ``[0deg, 360deg]``.
>>> aug = iaa.DirectedEdgeDetect(alpha=(0.0, 0.3), direction=0)
Similar to the previous examples, but here the edge image is alpha-blended
with the input image. The result is a mixture between the edge image and
the input image. The blending factor is randomly sampled between ``0%``
and ``30%``.
"""
def __init__(self, alpha=0, direction=(0.0, 1.0),
name=None, deterministic=False, random_state=None):
alpha_param = iap.handle_continuous_param(
alpha, "alpha",
value_range=(0, 1.0), tuple_to_uniform=True, list_to_choice=True)
direction_param = iap.handle_continuous_param(
direction, "direction",
value_range=None, tuple_to_uniform=True, list_to_choice=True)
def _create_matrices(_image, nb_channels, random_state_func):
alpha_sample = alpha_param.draw_sample(
random_state=random_state_func)
assert 0 <= alpha_sample <= 1.0, (
"Expected 'alpha' to be in the interval [0.0, 1.0], "
"got %.4f." % (alpha_sample,))
direction_sample = direction_param.draw_sample(
random_state=random_state_func)
deg = int(direction_sample * 360) % 360
rad = np.deg2rad(deg)
x = np.cos(rad - 0.5*np.pi)
y = np.sin(rad - 0.5*np.pi)
direction_vector | |
<filename>plasmasm/get_symbols.py
# Copyright (C) 2011-2020 Airbus, <EMAIL>
try:
from plasmasm.python.compatibility import reversed, sorted
except ImportError:
pass
from plasmasm.symbols import comm_symbol_section
def get_dynsyms(e):
if hasattr(e, 'Ehdr'):
relocs = [ sh for sh in e.sh if sh.sh.name in
[ a+b for a in [ '.rel', '.rela' ] for b in [ '.plt', '.dyn' ] ] ]
return [ {
'name': sh.sh.name + '.' + r.sym,
'address': r.offset,
'section': e.getsectionbyvad(r.offset).sh.name,
}
for sh in relocs for r in sh.reltab ]
elif hasattr(e, 'isPE') and e.isPE():
imports = []
for s in e.DirImport.impdesc:
libname = s.dlldescname.name.lower()
for ii, imp in enumerate(s.impbynames):
if hasattr(imp, 'name'):
funcname = imp.name
else:
funcname = str(imp)
imports.append( {
'name': libname + '_' + funcname,
'address': e.rva2virt(s.firstthunk+4*ii),
} )
return imports
return []
def get_exports(e):
if hasattr(e, 'isPE') and e.isPE():
exports = []
for direxport in e.DirExport:
for addr, name in direxport.exports.values():
if hasattr(addr, 'name'):
# Forwarder RVA:
# The exported symbol is defined by a name, not an address.
# We don't create a new symbol!
continue
addr = addr.rva
if e.COFFhdr.machine == pe.IMAGE_FILE_MACHINE_ARMNT:
# To have the same display as IDA on PE for ARM
addr -= 1
exports.append( {
'name':str(name),
'address':e.rva2virt(addr),
'section':e.getsectionbyrva(addr).name,
} )
return exports
return []
def get_symbols(e):
def create_symbol(s, fields):
return dict(zip([_[0] for _ in fields], [_[2](s) for _ in fields]))
if hasattr(e, 'Ehdr'):
fields = [
('name', '%-10r', lambda _: _.name),
('size', '%d', lambda _: _.size),
('ad', '%#x', lambda _: _.value),
('section', '%#x', lambda _: _.shndx),
('STT', '%#x', lambda _: _.info & 0xf),
('STB', '%#x', lambda _: _.info >> 4),
('STV', '%#x', lambda _: _.other),
]
return [ create_symbol(s, fields) for s in e.symbols ], fields
elif hasattr(e, 'isPE'):
fields = [
('name', '%-28r', lambda _: _.name),
('type', '%d', lambda _: _.type),
('scl', '%d', lambda _: _.storageclass),
('section', '%#x', lambda _: _.sectionnumber),
('ad', '%#x', lambda _: _.value),
('aux', '%r', lambda _: _.aux),
]
if not hasattr(e, 'Symbols'):
return [], fields
return [ create_symbol(s, fields) for s in e.Symbols.symbols ], fields
elif hasattr(e, 'Mhdr'):
fields = [
('name', '%-28r', lambda _: _.name),
('sect', '%d', lambda _: _.sectionindex),
('ad', '%#x', lambda _: _.value),
('type', '%#x', lambda _: _.type),
('desc', '%#x', lambda _: _.description),
]
symtabs = [ _ for _ in e.sect.sect if hasattr(_, 'symbols') ]
if len(symtabs) == 0:
return [], fields
return [ create_symbol(s, fields)
for sect in symtabs for s in sect.symbols ], fields
return [], []
def translate_symbol(s, e):
r = {}
if hasattr(e, 'Ehdr'):
r['name'] = s['name']
# ============
# type, bind and visibility
from elfesteem import elf
if s['STT'] == elf.STT_NOTYPE: pass
elif s['STT'] == elf.STT_OBJECT: r['type'] = 'object'
elif s['STT'] == elf.STT_FUNC: r['type'] = 'function'
elif s['STT'] == elf.STT_SECTION: pass
elif s['STT'] == elf.STT_FILE: pass
elif s['STT'] == elf.STT_TLS: r['type'] = 'tls_object'
else: raise ValueError("Unknown type %d"%s['STT'])
if s['STB'] == elf.STB_LOCAL: r['bind'] = 'local'
elif s['STB'] == elf.STB_GLOBAL: r['bind'] = 'globl'
elif s['STB'] == elf.STB_WEAK: r['bind'] = 'weak'
elif s['STB'] == elf.STB_GNU_UNIQUE:
r['bind'] = 'weak'
r['type'] = 'gnu_unique_object'
else: raise ValueError("Unknown binding %d"%s['STB'])
if s['STV'] == elf.STV_DEFAULT: pass
elif s['STV'] == elf.STV_INTERNAL: r['visibility'] = 'internal'
elif s['STV'] == elf.STV_HIDDEN: r['visibility'] = 'hidden'
elif s['STV'] == elf.STV_PROTECTED: r['visibility'] = 'protected'
else: raise ValueError("Unknown visibility %d"%s['STV'])
# ============
# Special cases
if s['STT'] == elf.STT_NOTYPE and s['name'].endswith('\001'):
return {}
if s['section'] == elf.SHN_ABS and s['STT'] != elf.STT_FILE:
return {}
if s['section'] == elf.SHN_ABS and s['name'] in ['', 'crtstuff.c']:
return {}
if s['STT'] == elf.STT_SECTION:
return {}
if s['section'] == elf.SHN_UNDEF and s['name'] != '':
return r
if s['STT'] == elf.STT_FILE:
return { 'name': s['name'], 'bind': 'file' }
# ============
# section, address, align and size
from plasmasm.parse_bin import parse_section_from_ELF
if s['section'] == elf.SHN_COMMON:
r['section'] = comm_symbol_section
r['align'] = s['ad']
elif s['section'] < len(e.sh.shlist):
section = e.sh[s['section']].sh
r['address'] = s['ad']
r['section'] = section.name
if r['section'] == '.bss' and s['size'] >= section.addralign:
# This symbol probably is the reason why the section
# is aligned
r['align'] = section.addralign
else:
raise ValueError("%s has SHNDX %#x"%(s['name'], s['section']))
r['size'] = s['size']
# ============
if not parse_section_from_ELF(r['section']):
return {}
if s['STB'] == elf.STB_LOCAL and e.Ehdr.machine == elf.EM_SPARC:
# The same local symbol may appear at multiple addresses
r['name'] += "@0x%08x"%s['ad']
elif hasattr(e, 'isPE'):
r['name'] = s['name']
# ============
# type, bind and visibility
from elfesteem import pe
if s['type']==pe.IMAGE_SYM_TYPE_NULL: pass
elif s['type']==pe.IMAGE_SYM_DTYPE_FUNCTION<<4: r['type'] = 'function'
else: raise ValueError("Unknown type %d"%s['type'])
if s['scl']==pe.IMAGE_SYM_CLASS_EXTERNAL: r['bind'] = 'globl'
elif s['scl']==pe.IMAGE_SYM_CLASS_STATIC: r['bind'] = 'local'
elif s['scl']==pe.IMAGE_SYM_CLASS_LABEL: pass
elif s['scl']==pe.IMAGE_SYM_CLASS_FILE: pass
else: raise ValueError("Unknown storageclass %d"%s['scl'])
# ============
# Special cases
if s['scl'] == pe.IMAGE_SYM_CLASS_LABEL:
return {}
if s['section'] == 0xfffe and s['scl'] == pe.IMAGE_SYM_CLASS_FILE:
name = s['aux'][0].name
if name == 'crt1.c': return {}
return { 'name': name, 'bind': 'file' }
if s['section'] == 0xffff:
return {}
if s['section'] == 0:
return {}
if s['name'] in ( '__data_start__',
'hname', 'fthunk',
'p.0',
'dw2_object_mutex.0',
'dw2_once.1',
'sjl_fc_key.2',
'sjl_once.3',
'eh_globals_static.4',
'eh_globals_key.5',
'eh_globals_once.6',
):
return {}
if s['name'].startswith('.'):
return {}
# ============
# section, address, align and size
section = e.SHList[s['section']-1]
r['section'] = section.name.strip('\0')
r['address'] = s['ad'] + e.rva2virt(section.addr)
elif hasattr(e, 'Mhdr'):
r['name'] = s['name']
# ============
# type, bind and visibility
from elfesteem import macho
if s['type'] & macho.N_EXT: r['bind'] = 'globl'
# ============
# section, address, align and size
if s['sect'] != 0:
section = e.sect.sect[s['sect']-1].sh
r['section'] = "%s,%s" % (section.segname, section.sectname)
r['address'] = s['ad']
elif s['ad'] != 0:
r['section'] = comm_symbol_section
r['align'] = 1 << (s['desc'] >> 8)
r['size'] = s['ad']
return r
def deduplicate_symbols(symbol_list):
# Quite often, multiple appearance of local symbols with the same name
# This is a pity, because one cannot generate an assembly file that
# generates multiple local symbols with the same name...
count = {}
for s in symbol_list:
if not 'name' in s: continue
if not s['name'] in count: count[s['name']] = 0
count[s['name']] += 1
for n in count:
if count[n] > 1:
dup = [s for s in symbol_list if s.get('name') == n]
# If all these symbols are 'file', then leave as it is
if count[n] == len([s for s in dup if s.get('bind') == 'file']):
continue
# If all these symbols are 'local', then rename the symbols
if count[n] == len([s for s in dup if s.get('bind') == 'local']):
for s in dup:
s['name'] += '....' + hex(int(s['address']))
continue
raise ValueError("Duplicate symbols %r" % dup)
return symbol_list
################################################################
# Relocation management
reloc_suffixes = {}
try:
from elfesteem import pe
def add_coff(machine_type, table):
if not machine_type in pe.__dict__:
return
machine_type = pe.__dict__[machine_type]
for R, r_type in table.items():
reloc_suffixes[('COFF', machine_type, R)] = r_type
add_coff('IMAGE_FILE_MACHINE_I386',{
pe.IMAGE_REL_I386_DIR32: (32, None),
pe.IMAGE_REL_I386_REL32: (32, None),
})
except ImportError:
pass
try:
from elfesteem import macho
def add_macho(machine_type, table):
if not machine_type in macho.__dict__:
return
machine_type = macho.__dict__[machine_type]
for R, r_type in table.items():
reloc_suffixes[('MACHO', machine_type, R)] = r_type
add_macho('CPU_TYPE_X86',{
(macho.GENERIC_RELOC_VANILLA,2,0): (32, None),
(macho.GENERIC_RELOC_VANILLA,2,1): (32, None),
(macho.GENERIC_RELOC_PAIR,2,0): (32, None), # Special case
(macho.GENERIC_RELOC_SECTDIFF,2,0): (32, None), # Special case
# macho.GENERIC_RELOC_PB_LA_PTR
(macho.GENERIC_RELOC_LOCAL_SECTDIFF,2,0): (32, None), # Special case
# macho.GENERIC_RELOC_TLV
})
add_macho('CPU_TYPE_X86_64',{
(macho.X86_64_RELOC_UNSIGNED,0,0): ( 8, None),
(macho.X86_64_RELOC_UNSIGNED,1,0): (16, None),
(macho.X86_64_RELOC_UNSIGNED,2,0): (32, None),
(macho.X86_64_RELOC_UNSIGNED,3,0): (64, None),
(macho.X86_64_RELOC_SIGNED,2,1): (32, None),
(macho.X86_64_RELOC_BRANCH,2,1): (64, None),
(macho.X86_64_RELOC_GOT_LOAD,2,1): (32, 'GOTPCREL'),
(macho.X86_64_RELOC_GOT,2,1): (32, 'GOTPCREL'),
# macho.X86_64_RELOC_SUBTRACTOR = 5 # must be followed by a X86_64_RELOC_UNSIGNED
(macho.X86_64_RELOC_SIGNED_1,2,1): (32, None),
(macho.X86_64_RELOC_SIGNED_2,2,1): (32, None),
(macho.X86_64_RELOC_SIGNED_4,2,1): (32, None),
# macho.X86_64_RELOC_TLV = 9 # for thread local variables
})
except ImportError:
pass
try:
from elfesteem import elf
def add_elf(machine_type, table):
if not machine_type in elf.__dict__:
return
machine_type = elf.__dict__[machine_type]
for R, r_type in table.items():
reloc_suffixes[('ELF', machine_type, R)] = r_type
add_elf('EM_386',{
elf.R_386_32: (32, None),
elf.R_386_PC32: (32, None),
elf.R_386_GOT32: (32, 'GOT'),
elf.R_386_GOT32X: (32, 'GOT'),
elf.R_386_PLT32: (32, 'PLT'),
# elf.R_386_COPY
# elf.R_386_GLOB_DAT
# elf.R_386_JMP_SLOT
# elf.R_386_RELATIVE
elf.R_386_GOTOFF: (32, 'GOTOFF'),
elf.R_386_GOTPC: (32, None),
# elf.R_386_32PLT
elf.R_386_TLS_GD_PLT: (32, 'tlsgdplt'),
elf.R_386_TLS_LDM_PLT: (32, 'tlsldmplt'),
elf.R_386_TLS_TPOFF: (32, 'ntpoff'), # duplicate
elf.R_386_TLS_IE: (32, 'indntpoff'),
elf.R_386_TLS_GOTIE: (32, 'gotntpoff'),
elf.R_386_TLS_LE: (32, 'ntpoff'), # duplicate
elf.R_386_TLS_GD: (32, 'tlsgd'),
elf.R_386_TLS_LDM: (32, 'tlsldm'),
# ...
elf.R_386_TLS_LDO_32: (32, 'dtpoff'), # duplicate
elf.R_386_TLS_IE_32: (32, 'gottpoff'),
elf.R_386_TLS_DTPMOD32: (32, 'dtpmod'),
elf.R_386_TLS_DTPOFF32: | |
sl[35] = 0, 1289
sl[36] = 65, 1326
sl[37] = 65, 1251
sl[38] = 65, 1176
sl[39] = 65, 1026
sl[40] = 65, 876
sl[41] = 65, 726
sl[42] = 65, 576
sl[43] = 65, 426
sl[44] = 65, 276
sl[45] = 65, 126
sl[46] = 65, 51
sl[47] = 65, 201
sl[48] = 65, 351
sl[49] = 65, 501
sl[50] = 65, 651
sl[51] = 65, 951
sl[52] = 65, 1101
sl[53] = 65, 801
self.SiteLoc = sl
self.check()
class uMap54_2a(Probe):
"""uMap54_2a, 65 um spacing, 2 column, staggered"""
def __init__(self):
self.layout = '2a'
self.name = 'uMap54_2a'
self.nchans = 54
self.ncols = 2
sl = {}
sl[0] = -28, 1235
sl[1] = -28, 1170
sl[2] = -28, 1105
sl[3] = -28, 1040
sl[4] = -28, 975
sl[5] = -28, 910
sl[6] = -28, 845
sl[7] = -28, 780
sl[8] = -28, 715
sl[9] = -28, 650
sl[10] = -28, 585
sl[11] = -28, 520
sl[12] = -28, 455
sl[13] = -28, 390
sl[14] = -28, 325
sl[15] = -28, 260
sl[16] = -28, 195
sl[17] = -28, 130
sl[18] = -28, 65
sl[19] = -28, 1300
sl[20] = -28, 1365
sl[21] = -28, 1430
sl[22] = -28, 1495
sl[23] = -28, 1560
sl[24] = -28, 1690
sl[25] = -28, 1755
sl[26] = -28, 1625
sl[27] = 28, 1722
sl[28] = 28, 1657
sl[29] = 28, 1592
sl[30] = 28, 1527
sl[31] = 28, 1462
sl[32] = 28, 1397
sl[33] = 28, 1332
sl[34] = 28, 32
sl[35] = 28, 97
sl[36] = 28, 162
sl[37] = 28, 227
sl[38] = 28, 292
sl[39] = 28, 357
sl[40] = 28, 422
sl[41] = 28, 487
sl[42] = 28, 552
sl[43] = 28, 617
sl[44] = 28, 682
sl[45] = 28, 747
sl[46] = 28, 812
sl[47] = 28, 877
sl[48] = 28, 942
sl[49] = 28, 1007
sl[50] = 28, 1072
sl[51] = 28, 1202
sl[52] = 28, 1267
sl[53] = 28, 1137
self.SiteLoc = sl
self.check()
class uMap54_2b(Probe):
"""uMap54_2b, 50 um spacing, 2 column, staggered"""
def __init__(self):
self.layout = '2b'
self.name = 'uMap54_2b'
self.nchans = 54
self.ncols = 2
sl = {}
sl[0] = -25, 1275
sl[1] = -25, 1175
sl[2] = -25, 1075
sl[3] = -25, 975
sl[4] = -25, 875
sl[5] = -25, 775
sl[6] = -25, 725
sl[7] = -25, 675
sl[8] = -25, 625
sl[9] = -25, 575
sl[10] = -25, 525
sl[11] = -25, 475
sl[12] = -25, 425
sl[13] = -25, 375
sl[14] = -25, 325
sl[15] = -25, 275
sl[16] = -25, 225
sl[17] = -25, 175
sl[18] = -25, 125
sl[19] = -25, 75
sl[20] = -25, 25
sl[21] = -25, 825
sl[22] = -25, 925
sl[23] = -25, 1025
sl[24] = -25, 1225
sl[25] = -25, 1325
sl[26] = -25, 1125
sl[27] = 25, 1300
sl[28] = 25, 1200
sl[29] = 25, 1100
sl[30] = 25, 1000
sl[31] = 25, 900
sl[32] = 25, 0
sl[33] = 25, 50
sl[34] = 25, 100
sl[35] = 25, 150
sl[36] = 25, 200
sl[37] = 25, 250
sl[38] = 25, 300
sl[39] = 25, 350
sl[40] = 25, 400
sl[41] = 25, 450
sl[42] = 25, 500
sl[43] = 25, 550
sl[44] = 25, 600
sl[45] = 25, 650
sl[46] = 25, 700
sl[47] = 25, 750
sl[48] = 25, 800
sl[49] = 25, 850
sl[50] = 25, 950
sl[51] = 25, 1150
sl[52] = 25, 1250
sl[53] = 25, 1050
self.SiteLoc = sl
self.check()
class pt16a_HS27(Probe):
"""pt16a in DIP-16 to HS-27 adapter"""
def __init__(self):
self.layout = 'pt16a_HS27'
self.name = 'pt16a_HS27'
self.nchans = 20
self.ncols = 2
sl = {}
sl[0] = -27, 279
sl[1] = -27, 217
sl[2] = -27, 155
sl[3] = -27, 93
sl[4] = -27, 31
sl[5] = -27, 341
sl[6] = -27, 403
sl[7] = -27, 465
# Gap of 4 (grounded) chans in the adapter, give them sites below the probe:
sl[8] = -27, 650
sl[9] = -27, 700
sl[10] = 27, 650
sl[11] = 27, 700
# Back to actual polytrode sites:
sl[12] = 27, 434
sl[13] = 27, 372
sl[14] = 27, 310
sl[15] = 27, 0
sl[16] = 27, 62
sl[17] = 27, 124
sl[18] = 27, 186
sl[19] = 27, 248
self.SiteLoc = sl
self.check()
class pt16b_HS27(Probe):
"""pt16b in DIP-16 to HS-27 adapter"""
def __init__(self):
self.layout = 'pt16b_HS27'
self.name = 'pt16b_HS27'
self.nchans = 20
self.ncols = 2
sl = {}
sl[0] = -27, 155
sl[1] = -27, 93
sl[2] = -27, 217
sl[3] = -27, 341
sl[4] = -27, 31
sl[5] = -27, 279
sl[6] = -27, 403
sl[7] = -27, 465
# Gap of 4 (grounded) chans in the adapter, give them sites below the probe:
sl[8] = -27, 650
sl[9] = -27, 700
sl[10] = 27, 650
sl[11] = 27, 700
# Back to actual polytrode sites:
sl[12] = 27, 434
sl[13] = 27, 372
sl[14] = 27, 248
sl[15] = 27, 0
sl[16] = 27, 310
sl[17] = 27, 186
sl[18] = 27, 62
sl[19] = 27, 124
self.SiteLoc = sl
self.check()
class single(Probe):
"""Single channel"""
def __init__(self):
self.layout = 'single'
self.name = 'single'
self.nchans = 1
self.ncols = 1
sl = {}
sl[0] = 0, 0
self.SiteLoc = sl
self.check()
class IMEC30(Probe):
"""30 chan IMEC probe snippet, 2 column, 22 um rectangular spacing"""
def __init__(self):
self.layout = 'IMEC30'
self.name = 'IMEC30'
self.nchans = 30
self.ncols = 2
sl = {}
sl[0] = 0, 1050
sl[1] = 22, 1050
sl[2] = 0, 1072
sl[3] = 22, 1072
sl[4] = 0, 1094
sl[5] = 22, 1094
sl[6] = 0, 1116
sl[7] = 22, 1116
sl[8] = 0, 1138
sl[9] = 22, 1138
sl[10] = 0, 1160
sl[11] = 22, 1160
sl[12] = 0, 1182
sl[13] = 22, 1182
sl[14] = 0, 1204
sl[15] = 22, 1204
sl[16] = 0, 1226
sl[17] = 22, 1226
sl[18] = 0, 1248
sl[19] = 22, 1248
sl[20] = 0, 1270
sl[21] = 22, 1270
sl[22] = 0, 1292
sl[23] = 22, 1292
sl[24] = 0, 1314
sl[25] = 22, 1314
sl[26] = 0, 1336
sl[27] = 22, 1336
sl[28] = 0, 1358
sl[29] = 22, 1358
self.SiteLoc = sl
self.check()
class A1x32(Probe):
"""A1x32, 25 um spacing, single column, 1-based channel IDs"""
def __init__(self):
self.layout = 'A1x32-5mm-25-177'
self.name = 'A1x32'
self.nchans = 32
self.ncols = 1
sl = {}
sl[1] = 0, 775
sl[2] = 0, 725
sl[3] = 0, 675
sl[4] = 0, 625
sl[5] = 0, 575
sl[6] = 0, 525
sl[7] = 0, 475
sl[8] = 0, 425
sl[9] = 0, 375
sl[10] = 0, 325
sl[11] = 0, 275
sl[12] = 0, 225
sl[13] = 0, 175
sl[14] = 0, 125
sl[15] = 0, 75
sl[16] = 0, 25
sl[17] = 0, 0
sl[18] = 0, 50
sl[19] = 0, 100
sl[20] = 0, 150
sl[21] = 0, 200
sl[22] = 0, 250
sl[23] = 0, 300
sl[24] = 0, 350
sl[25] = 0, 400
sl[26] = 0, 450
sl[27] = 0, 500
sl[28] = 0, 550
sl[29] = 0, 600
sl[30] = 0, 650
sl[31] = 0, 700
sl[32] = 0, 750
self.SiteLoc = sl
self.check()
class A1x32_edge(Probe):
"""A1x32 edge, 20 um spacing, single column, 1-based channel IDs"""
def __init__(self):
self.layout = 'A1x32-Edge-5mm-20-177-A32'
self.name = 'A1x32_edge'
self.nchans = 32
self.ncols = 1
sl = {}
sl[1] = 0, 620
sl[2] = 0, 600
sl[3] = 0, 580
sl[4] = 0, 560
sl[5] = 0, 540
sl[6] = 0, 520
sl[7] = 0, 500
sl[8] = 0, 480
sl[9] = 0, 460
sl[10] = 0, 440
sl[11] = 0, 420
sl[12] = 0, 400
sl[13] = 0, 380
sl[14] = 0, 360
sl[15] = 0, 340
sl[16] = 0, 320
sl[17] = 0, 300
sl[18] = 0, 280
sl[19] = 0, 260
sl[20] = 0, 240
sl[21] = 0, 220
| |
3-D array
transformers = reshaped_temp + pstar # [2, grow, gcol]
# Correct the points where pTwp is singular
if flag:
blidx = det == np.inf # bool index
transformers[0][blidx] = vx[blidx] + qstar[0][blidx] - pstar[0][blidx]
transformers[1][blidx] = vy[blidx] + qstar[1][blidx] - pstar[1][blidx]
# Removed the points outside the border
transformers[transformers < 0] = 0
transformers[0][transformers[0] > height - 1] = 0
transformers[1][transformers[1] > width - 1] = 0
# Mapping original image
transformed_image = image[tuple(transformers.astype(np.int16))] # [grow, gcol]
# Rescale image
transformed_image = rescale(transformed_image, scale=1.0 / density, mode='reflect')
return transformed_image
def mls_similarity_deformation(image, p, q, alpha=1.0, density=1.0):
''' Similarity deformation
### Params:
* image - ndarray: original image
* p - ndarray: an array with size [n, 2], original control points
* q - ndarray: an array with size [n, 2], final control points
* alpha - float: parameter used by weights
* density - float: density of the grids
### Return:
A deformed image.
'''
height = image.shape[0]
width = image.shape[1]
# Change (x, y) to (row, col)
q = q[:, [1, 0]]
p = p[:, [1, 0]]
# Make grids on the original image
gridX = np.linspace(0, width, num=int(width*density), endpoint=False)
gridY = np.linspace(0, height, num=int(height*density), endpoint=False)
vy, vx = np.meshgrid(gridX, gridY)
grow = vx.shape[0] # grid rows
gcol = vx.shape[1] # grid cols
ctrls = p.shape[0] # control points
# Compute
reshaped_p = p.reshape(ctrls, 2, 1, 1) # [ctrls, 2, 1, 1]
reshaped_v = np.vstack((vx.reshape(1, grow, gcol), vy.reshape(1, grow, gcol))) # [2, grow, gcol]
w = 1.0 / np.sum((reshaped_p - reshaped_v) ** 2, axis=1)**alpha # [ctrls, grow, gcol]
sum_w = np.sum(w, axis=0) # [grow, gcol]
pstar = np.sum(w * reshaped_p.transpose(1, 0, 2, 3), axis=1) / sum_w # [2, grow, gcol]
phat = reshaped_p - pstar # [ctrls, 2, grow, gcol]
reshaped_phat1 = phat.reshape(ctrls, 1, 2, grow, gcol) # [ctrls, 1, 2, grow, gcol]
reshaped_phat2 = phat.reshape(ctrls, 2, 1, grow, gcol) # [ctrls, 2, 1, grow, gcol]
reshaped_w = w.reshape(ctrls, 1, 1, grow, gcol) # [ctrls, 1, 1, grow, gcol]
mu = np.sum(np.matmul(reshaped_w.transpose(0, 3, 4, 1, 2) *
reshaped_phat1.transpose(0, 3, 4, 1, 2),
reshaped_phat2.transpose(0, 3, 4, 1, 2)), axis=0) # [grow, gcol, 1, 1]
reshaped_mu = mu.reshape(1, grow, gcol) # [1, grow, gcol]
neg_phat_verti = phat[:, [1, 0],...] # [ctrls, 2, grow, gcol]
neg_phat_verti[:, 1,...] = -neg_phat_verti[:, 1,...]
reshaped_neg_phat_verti = neg_phat_verti.reshape(ctrls, 1, 2, grow, gcol) # [ctrls, 1, 2, grow, gcol]
mul_left = np.concatenate((reshaped_phat1, reshaped_neg_phat_verti), axis=1) # [ctrls, 2, 2, grow, gcol]
vpstar = reshaped_v - pstar # [2, grow, gcol]
reshaped_vpstar = vpstar.reshape(2, 1, grow, gcol) # [2, 1, grow, gcol]
neg_vpstar_verti = vpstar[[1, 0],...] # [2, grow, gcol]
neg_vpstar_verti[1,...] = -neg_vpstar_verti[1,...]
reshaped_neg_vpstar_verti = neg_vpstar_verti.reshape(2, 1, grow, gcol) # [2, 1, grow, gcol]
mul_right = np.concatenate((reshaped_vpstar, reshaped_neg_vpstar_verti), axis=1) # [2, 2, grow, gcol]
reshaped_mul_right = mul_right.reshape(1, 2, 2, grow, gcol) # [1, 2, 2, grow, gcol]
A = np.matmul((reshaped_w * mul_left).transpose(0, 3, 4, 1, 2),
reshaped_mul_right.transpose(0, 3, 4, 1, 2)) # [ctrls, grow, gcol, 2, 2]
# Calculate q
reshaped_q = q.reshape((ctrls, 2, 1, 1)) # [ctrls, 2, 1, 1]
qstar = np.sum(w * reshaped_q.transpose(1, 0, 2, 3), axis=1) / np.sum(w, axis=0) # [2, grow, gcol]
qhat = reshaped_q - qstar # [ctrls, 2, grow, gcol]
reshaped_qhat = qhat.reshape(ctrls, 1, 2, grow, gcol).transpose(0, 3, 4, 1, 2) # [ctrls, grow, gcol, 1, 2]
# Get final image transfomer -- 3-D array
temp = np.sum(np.matmul(reshaped_qhat, A), axis=0).transpose(2, 3, 0, 1) # [1, 2, grow, gcol]
reshaped_temp = temp.reshape(2, grow, gcol) # [2, grow, gcol]
transformers = reshaped_temp / reshaped_mu + qstar # [2, grow, gcol]
# Removed the points outside the border
transformers[transformers < 0] = 0
transformers[0][transformers[0] > height - 1] = 0
transformers[1][transformers[1] > width - 1] = 0
# Mapping original image
transformed_image = np.ones_like(image) * 255
new_gridY, new_gridX = np.meshgrid((np.arange(gcol) / density).astype(np.int16),
(np.arange(grow) / density).astype(np.int16))
transformed_image[tuple(transformers.astype(np.int16))] = image[new_gridX, new_gridY] # [grow, gcol]
return transformed_image
def mls_similarity_deformation_inv(image, p, q, alpha=1.0, density=1.0):
''' Similarity inverse deformation
### Params:
* image - ndarray: original image
* p - ndarray: an array with size [n, 2], original control points
* q - ndarray: an array with size [n, 2], final control points
* alpha - float: parameter used by weights
* density - float: density of the grids
### Return:
A deformed image.
'''
height = image.shape[0]
width = image.shape[1]
# Change (x, y) to (row, col)
q = q[:, [1, 0]]
p = p[:, [1, 0]]
# Make grids on the original image
gridX = np.linspace(0, width, num=int(width*density), endpoint=False)
gridY = np.linspace(0, height, num=int(height*density), endpoint=False)
vy, vx = np.meshgrid(gridX, gridY)
grow = vx.shape[0] # grid rows
gcol = vx.shape[1] # grid cols
ctrls = p.shape[0] # control points
# Compute
reshaped_p = p.reshape(ctrls, 2, 1, 1) # [ctrls, 2, 1, 1]
reshaped_q = q.reshape((ctrls, 2, 1, 1)) # [ctrls, 2, 1, 1]
reshaped_v = np.vstack((vx.reshape(1, grow, gcol), vy.reshape(1, grow, gcol))) # [2, grow, gcol]
w = 1.0 / np.sum((reshaped_p - reshaped_v) ** 2, axis=1)**alpha # [ctrls, grow, gcol]
w[w == np.inf] = 2**31 - 1
pstar = np.sum(w * reshaped_p.transpose(1, 0, 2, 3), axis=1) / np.sum(w, axis=0) # [2, grow, gcol]
phat = reshaped_p - pstar # [ctrls, 2, grow, gcol]
qstar = np.sum(w * reshaped_q.transpose(1, 0, 2, 3), axis=1) / np.sum(w, axis=0) # [2, grow, gcol]
qhat = reshaped_q - qstar # [ctrls, 2, grow, gcol]
reshaped_phat1 = phat.reshape(ctrls, 1, 2, grow, gcol) # [ctrls, 1, 2, grow, gcol]
reshaped_phat2 = phat.reshape(ctrls, 2, 1, grow, gcol) # [ctrls, 2, 1, grow, gcol]
reshaped_qhat = qhat.reshape(ctrls, 1, 2, grow, gcol) # [ctrls, 1, 2, grow, gcol]
reshaped_w = w.reshape(ctrls, 1, 1, grow, gcol) # [ctrls, 1, 1, grow, gcol]
mu = np.sum(np.matmul(reshaped_w.transpose(0, 3, 4, 1, 2) *
reshaped_phat1.transpose(0, 3, 4, 1, 2),
reshaped_phat2.transpose(0, 3, 4, 1, 2)), axis=0) # [grow, gcol, 1, 1]
reshaped_mu = mu.reshape(1, grow, gcol) # [1, grow, gcol]
neg_phat_verti = phat[:, [1, 0],...] # [ctrls, 2, grow, gcol]
neg_phat_verti[:, 1,...] = -neg_phat_verti[:, 1,...]
reshaped_neg_phat_verti = neg_phat_verti.reshape(ctrls, 1, 2, grow, gcol) # [ctrls, 1, 2, grow, gcol]
mul_right = np.concatenate((reshaped_phat1, reshaped_neg_phat_verti), axis=1) # [ctrls, 2, 2, grow, gcol]
mul_left = reshaped_qhat * reshaped_w # [ctrls, 1, 2, grow, gcol]
Delta = np.sum(np.matmul(mul_left.transpose(0, 3, 4, 1, 2),
mul_right.transpose(0, 3, 4, 1, 2)),
axis=0).transpose(0, 1, 3, 2) # [grow, gcol, 2, 1]
Delta_verti = Delta[...,[1, 0],:] # [grow, gcol, 2, 1]
Delta_verti[...,0,:] = -Delta_verti[...,0,:]
B = np.concatenate((Delta, Delta_verti), axis=3) # [grow, gcol, 2, 2]
try:
inv_B = np.linalg.inv(B) # [grow, gcol, 2, 2]
flag = False
except np.linalg.linalg.LinAlgError:
flag = True
det = np.linalg.det(B) # [grow, gcol]
det[det < 1e-8] = np.inf
reshaped_det = det.reshape(grow, gcol, 1, 1) # [grow, gcol, 1, 1]
adjoint = B[:,:,[[1, 0], [1, 0]], [[1, 1], [0, 0]]] # [grow, gcol, 2, 2]
adjoint[:,:,[0, 1], [1, 0]] = -adjoint[:,:,[0, 1], [1, 0]] # [grow, gcol, 2, 2]
inv_B = (adjoint / reshaped_det).transpose(2, 3, 0, 1) # [2, 2, grow, gcol]
v_minus_qstar_mul_mu = (reshaped_v - qstar) * reshaped_mu # [2, grow, gcol]
# Get final image transfomer -- 3-D array
reshaped_v_minus_qstar_mul_mu = v_minus_qstar_mul_mu.reshape(1, 2, grow, gcol) # [1, 2, grow, gcol]
transformers = np.matmul(reshaped_v_minus_qstar_mul_mu.transpose(2, 3, 0, 1),
inv_B).reshape(grow, gcol, 2).transpose(2, 0, 1) + pstar # [2, grow, gcol]
# Correct the points where pTwp is singular
if flag:
blidx = det == np.inf # bool index
transformers[0][blidx] = vx[blidx] + qstar[0][blidx] - pstar[0][blidx]
transformers[1][blidx] = vy[blidx] + qstar[1][blidx] - pstar[1][blidx]
# Removed the points outside the border
transformers[transformers < 0] = 0
transformers[0][transformers[0] > height - 1] = 0
transformers[1][transformers[1] > width - 1] = 0
# Mapping original image
transformed_image = image[tuple(transformers.astype(np.int16))] # [grow, gcol]
# Rescale image
transformed_image = rescale(transformed_image, scale=1.0 / density, mode='reflect')
return transformed_image
def mls_rigid_deformation(image, p, q, alpha=1.0, density=1.0):
''' Rigid deformation
### Params:
* image - ndarray: original image
* | |
#!/usr/bin/env python
# Copyright 2016 <NAME>
# Apache 2.0.
from __future__ import print_function
from __future__ import division
from collections import defaultdict
import argparse
import sys
import math
def GetArgs():
parser = argparse.ArgumentParser(
description="Use a Bayesian framework to select"
"pronunciation candidates from three sources: reference lexicon"
", G2P lexicon and phonetic-decoding lexicon. The inputs are a word-stats file,"
"a pron-stats file, and three source lexicons (ref/G2P/phonetic-decoding)."
"We assume the pronunciations for each word follow a Categorical distribution"
"with Dirichlet priors. Thus, with user-specified prior counts (parameterized by"
"prior-mean and prior-count-tot) and observed counts from the pron-stats file, "
"we can compute posterior for each pron, and select candidates with highest"
"posteriors, until we hit user-specified variants-prob-mass/counts thresholds."
"The outputs are: a file specifiying posteriors of all candidate (pron_posteriors),"
"a learned lexicon for words out of the ref. vocab (learned_lexicon_oov),"
"and a lexicon_edits file containing suggested modifications of prons, for"
"words within the ref. vocab (ref_lexicon_edits).",
epilog="See steps/dict/learn_lexicon_bayesian.sh for example.",
)
parser.add_argument(
"--prior-mean",
type=str,
default="0,0,0",
help="Mean of priors (summing up to 1) assigned to three exclusive n"
"pronunciatio sources: reference lexicon, g2p, and phonetic decoding. We "
"recommend setting a larger prior mean for the reference lexicon, e.g. '0.6,0.2,0.2'",
)
parser.add_argument(
"--prior-counts-tot",
type=float,
default=15.0,
help="Total amount of prior counts we add to all pronunciation candidates of"
"each word. By timing it with the prior mean of a source, and then dividing"
"by the number of candidates (for a word) from this source, we get the"
"prior counts we actually add to each candidate.",
)
parser.add_argument(
"--variants-prob-mass",
type=float,
default=0.7,
help="For each word, we pick up candidates (from all three sources)"
"with highest posteriors until the total prob mass hit this amount.",
)
parser.add_argument(
"--variants-prob-mass-ref",
type=float,
default=0.9,
help="For each word, after the total prob mass of selected candidates "
"hit variants-prob-mass, we continue to pick up reference candidates"
"with highest posteriors until the total prob mass hit this amount (must >= variants-prob-mass).",
)
parser.add_argument(
"--variants-counts",
type=int,
default=1,
help="Generate upto this many variants of prons for each word out"
"of the ref. lexicon.",
)
parser.add_argument(
"silence_file",
metavar="<silphonetic-file>",
type=str,
help="File containing a list of silence phones.",
)
parser.add_argument(
"pron_stats_file",
metavar="<stats-file>",
type=str,
help="File containing pronunciation statistics from lattice alignment; "
"each line must be <count> <word> <phones>.",
)
parser.add_argument(
"word_counts_file",
metavar="<counts-file>",
type=str,
help="File containing word counts in acoustic training data; "
"each line must be <word> <count>.",
)
parser.add_argument(
"ref_lexicon",
metavar="<reference-lexicon>",
type=str,
help="The reference lexicon (most probably hand-derived)."
"Each line must be <word> <phones>",
)
parser.add_argument(
"g2p_lexicon",
metavar="<g2p-expanded-lexicon>",
type=str,
help="Candidate ronouciations from G2P results."
"Each line must be <word> <phones>",
)
parser.add_argument(
"phonetic_decoding_lexicon",
metavar="<prons-in-acoustic-evidence>",
type=str,
help="Candidate ronouciations from phonetic decoding results."
"Each line must be <word> <phones>",
)
parser.add_argument(
"pron_posteriors",
metavar="<pron-posteriors>",
type=str,
help="Output file containing posteriors of all candidate prons for each word,"
"based on which we select prons to construct the learned lexicon."
"each line is <word> <pronunciation-source: one of R(ef)/G(2P)/P(hone-decoding)> <posterior> <pronunciation> ",
)
parser.add_argument(
"learned_lexicon_oov",
metavar="<learned-lexicon-oov>",
type=str,
help="Output file which is the learned lexicon for words out of the ref. vocab.",
)
parser.add_argument(
"ref_lexicon_edits",
metavar="<lexicon-edits>",
type=str,
help="Output file containing human-readable & editable pronounciation info (and the"
"accept/reject decision made by our algorithm) for those words in ref. vocab,"
"to which any change has been recommended. The info for each word is like:"
"------------ an 4086.0 --------------"
"R | Y | 2401.6 | AH N"
"R | Y | 640.8 | AE N"
"P | Y | 1035.5 | IH N"
"R(ef), P(hone-decoding) represents the pronunciation source"
"Y/N means the recommended decision of including this pron or not"
"and the numbers are soft counts accumulated from lattice-align-word outputs. "
"See the function WriteEditsAndSummary for more details.",
)
print(" ".join(sys.argv), file=sys.stderr)
args = parser.parse_args()
args = CheckArgs(args)
return args
def CheckArgs(args):
args.silence_file_handle = open(args.silence_file)
if args.pron_stats_file == "-":
args.pron_stats_file_handle = sys.stdin
else:
args.pron_stats_file_handle = open(args.pron_stats_file)
args.word_counts_file_handle = open(args.word_counts_file)
args.ref_lexicon_handle = open(args.ref_lexicon)
args.g2p_lexicon_handle = open(args.g2p_lexicon)
args.phonetic_decoding_lexicon_handle = open(args.phonetic_decoding_lexicon)
args.pron_posteriors_handle = open(args.pron_posteriors, "w")
args.learned_lexicon_oov_handle = open(args.learned_lexicon_oov, "w")
args.ref_lexicon_edits_handle = open(args.ref_lexicon_edits, "w")
prior_mean = args.prior_mean.strip().split(",")
if len(prior_mean) is not 3:
raise Exception("Invalid Dirichlet prior mean ", args.prior_mean)
for i in range(0, 3):
if float(prior_mean[i]) <= 0 or float(prior_mean[i]) >= 1:
raise Exception(
"Dirichlet prior mean",
prior_mean[i],
"is invalid, it must be between 0 and 1.",
)
args.prior_mean = [float(prior_mean[0]), float(prior_mean[1]), float(prior_mean[2])]
return args
def ReadPronStats(pron_stats_file_handle):
stats = {}
for line in pron_stats_file_handle.readlines():
splits = line.strip().split()
if len(splits) == 0:
continue
if len(splits) < 2:
raise Exception("Invalid format of line " + line + " in stats file.")
count = float(splits[0])
word = splits[1]
phones = " ".join(splits[2:])
stats[(word, phones)] = count
return stats
def ReadWordCounts(word_counts_file_handle):
counts = {}
for line in word_counts_file_handle.readlines():
splits = line.strip().split()
if len(splits) < 2:
raise Exception("Invalid format of line " + line + " in counts file.")
word = splits[0]
count = int(splits[1])
counts[word] = count
return counts
def ReadLexicon(args, lexicon_file_handle, counts):
# we're skipping any word not in counts (not seen in training data),
# cause we're only learning prons for words who have acoustic examples.
lexicon = defaultdict(set)
for line in lexicon_file_handle.readlines():
splits = line.strip().split()
if len(splits) == 0:
continue
if len(splits) < 2:
raise Exception("Invalid format of line " + line + " in lexicon file.")
word = splits[0]
if word not in counts:
continue
phones = " ".join(splits[1:])
lexicon[word].add(phones)
return lexicon
def FilterPhoneticDecodingLexicon(args, phonetic_decoding_lexicon, stats):
# We want to remove all candidates which contains silence phones
silphones = set()
for line in args.silence_file_handle:
silphones.add(line.strip())
rejected_candidates = set()
for word, prons in phonetic_decoding_lexicon.items():
for pron in prons:
for phone in pron.split():
if phone in silphones:
if (word, pron) in stats:
count = stats[(word, pron)]
del stats[(word, pron)]
else:
count = 0
rejected_candidates.add((word, pron))
print(
"WARNING: removing the candidate pronunciation from phonetic-decoding: {0}: "
'"{1}" whose soft-count from lattice-alignment is {2}, cause it contains at'
" least one silence phone.".format(word, pron, count),
file=sys.stderr,
)
break
for word, pron in rejected_candidates:
phonetic_decoding_lexicon[word].remove(pron)
return phonetic_decoding_lexicon, stats
def ComputePriorCounts(
args, counts, ref_lexicon, g2p_lexicon, phonetic_decoding_lexicon
):
prior_counts = defaultdict(list)
# In case one source is absent for a word, we set zero prior to this source,
# and then re-normalize the prior mean parameters s.t. they sum up to one.
for word in counts:
prior_mean = [args.prior_mean[0], args.prior_mean[1], args.prior_mean[2]]
if word not in ref_lexicon:
prior_mean[0] = 0
if word not in g2p_lexicon:
prior_mean[1] = 0
if word not in phonetic_decoding_lexicon:
prior_mean[2] = 0
prior_mean_sum = sum(prior_mean)
try:
prior_mean = [float(t) / prior_mean_sum for t in prior_mean]
except ZeroDivisionError:
print(
"WARNING: word {} appears in train_counts but not in any lexicon.".format(
word
),
file=sys.stderr,
)
prior_counts[word] = [t * args.prior_counts_tot for t in prior_mean]
return prior_counts
def ComputePosteriors(
args, stats, ref_lexicon, g2p_lexicon, phonetic_decoding_lexicon, prior_counts
):
posteriors = defaultdict(
list
) # This dict stores a list of (pronunciation, posterior)
# pairs for each word, where the posteriors are normalized soft counts. Before normalization,
# The soft-counts were augmented by a user-specified prior count, according the source
# (ref/G2P/phonetic-decoding) of this pronunciation.
for word, prons in ref_lexicon.items():
for pron in prons:
# c is the augmented soft count (observed count + prior count)
c = float(prior_counts[word][0]) / len(ref_lexicon[word]) + stats.get(
(word, pron), 0
)
posteriors[word].append((pron, c))
for word, prons in g2p_lexicon.items():
for pron in prons:
c = float(prior_counts[word][1]) / len(g2p_lexicon[word]) + stats.get(
(word, pron), 0
)
posteriors[word].append((pron, c))
for word, prons in phonetic_decoding_lexicon.items():
for pron in prons:
c = float(prior_counts[word][2]) / len(
phonetic_decoding_lexicon[word]
) + stats.get((word, pron), 0)
posteriors[word].append((pron, c))
num_prons_from_ref = sum(len(ref_lexicon[i]) for i in ref_lexicon)
num_prons_from_g2p = sum(len(g2p_lexicon[i]) for i in g2p_lexicon)
num_prons_from_phonetic_decoding = sum(
len(phonetic_decoding_lexicon[i]) for i in phonetic_decoding_lexicon
)
print(
"---------------------------------------------------------------------------------------------------",
file=sys.stderr,
)
print("Total num. words is {}:".format(len(posteriors)), file=sys.stderr)
print(
"{0} candidate prons came from the reference lexicon; {1} came from G2P;{2} came from"
"phonetic_decoding".format(
num_prons_from_ref, num_prons_from_g2p, num_prons_from_phonetic_decoding
),
file=sys.stderr,
)
print(
"---------------------------------------------------------------------------------------------------",
file=sys.stderr,
)
# Normalize the augmented soft counts to get posteriors.
count_sum = defaultdict(float) # This dict stores the | |
from enum import Enum
from typing import List, Literal, Optional
from pydantic import BaseModel
from .bytes_to_base64url import bytes_to_base64url
from .cose import COSEAlgorithmIdentifier
from .json_loads_base64url_to_bytes import json_loads_base64url_to_bytes
from .snake_case_to_camel_case import snake_case_to_camel_case
class WebAuthnBaseModel(BaseModel):
"""
A subclass of Pydantic's BaseModel that includes convenient defaults
when working with WebAuthn data structures
`modelInstance.json()` (to JSON):
- Encodes bytes to Base64URL
- Converts snake_case properties to camelCase
`Model.parse_raw()` (from JSON):
- Decodes Base64URL to bytes
- Converts camelCase properties to snake_case
"""
class Config:
json_encoders = {bytes: bytes_to_base64url}
json_loads = json_loads_base64url_to_bytes
alias_generator = snake_case_to_camel_case
allow_population_by_field_name = True
################
#
# Fundamental data structures
#
################
class AuthenticatorTransport(str, Enum):
"""How an authenticator communicates to the client/browser.
Members:
`USB`: USB wired connection
`NFC`: Near Field Communication
`BLE`: Bluetooth Low Energy
`INTERNAL`: Direct connection (read: a platform authenticator)
https://www.w3.org/TR/webauthn-2/#enum-transport
"""
USB = "usb"
NFC = "nfc"
BLE = "ble"
INTERNAL = "internal"
class AuthenticatorAttachment(str, Enum):
"""How an authenticator is connected to the client/browser.
Members:
`PLATFORM`: A non-removable authenticator, like TouchID or Windows Hello
`CROSS_PLATFORM`: A "roaming" authenticator, like a YubiKey
https://www.w3.org/TR/webauthn-2/#enumdef-authenticatorattachment
"""
PLATFORM = "platform"
CROSS_PLATFORM = "cross-platform"
class ResidentKeyRequirement(str, Enum):
"""The Relying Party's preference for the authenticator to create a dedicated "client-side" credential for it. Requiring an authenticator to store a dedicated credential should not be done lightly due to the limited storage capacity of some types of authenticators.
Members:
`DISCOURAGED`: The authenticator should not create a dedicated credential
`PREFERRED`: The authenticator can create and store a dedicated credential, but if it doesn't that's alright too
`REQUIRED`: The authenticator MUST create a dedicated credential. If it cannot, the RP is prepared for an error to occur.
https://www.w3.org/TR/webauthn-2/#enum-residentKeyRequirement
"""
DISCOURAGED = "discouraged"
PREFERRED = "preferred"
REQUIRED = "required"
class UserVerificationRequirement(str, Enum):
"""The degree to which the Relying Party wishes to verify a user's identity.
Members:
`REQUIRED`: User verification must occur
`PREFERRED`: User verification would be great, but if not that's okay too
`DISCOURAGED`: User verification should not occur, but it's okay if it does
https://www.w3.org/TR/webauthn-2/#enumdef-userverificationrequirement
"""
REQUIRED = "required"
PREFERRED = "preferred"
DISCOURAGED = "discouraged"
class AttestationConveyancePreference(str, Enum):
"""The Relying Party's interest in receiving an attestation statement.
Members:
`NONE`: The Relying Party isn't interested in receiving an attestation statement
`INDIRECT`: The Relying Party is interested in an attestation statement, but the client is free to generate it as it sees fit
`DIRECT`: The Relying Party is interested in an attestation statement generated directly by the authenticator
`ENTERPRISE`: The Relying Party is interested in a statement with identifying information. Typically used within organizations
https://www.w3.org/TR/webauthn-2/#enum-attestation-convey
"""
NONE = "none"
INDIRECT = "indirect"
DIRECT = "direct"
ENTERPRISE = "enterprise"
class PublicKeyCredentialType(str, Enum):
"""The type of credential that should be returned by an authenticator. There's but a single member because this is a specific subclass of a higher-level `CredentialType` that can be of other types.
Members:
`PUBLIC_KEY`: The literal string `"public-key"`
https://www.w3.org/TR/webauthn-2/#enumdef-publickeycredentialtype
"""
PUBLIC_KEY = "public-key"
class AttestationFormat(str, Enum):
"""The "syntax" of an attestation statement. Formats should be registered with the IANA and include documented signature verification steps.
Members:
`PACKED`
`TPM`
`ANDROID_KEY`
`ANDROID_SAFETYNET`
`FIDO_U2F`
`APPLE`
`NONE`
https://www.iana.org/assignments/webauthn/webauthn.xhtml
"""
PACKED = "packed"
TPM = "tpm"
ANDROID_KEY = "android-key"
ANDROID_SAFETYNET = "android-safetynet"
FIDO_U2F = "fido-u2f"
APPLE = "apple"
NONE = "none"
class ClientDataType(str, Enum):
"""Specific values included in authenticator registration and authentication responses to help avoid certain types of "signature confusion attacks".
Members:
`WEBAUTHN_CREATE`: The string "webauthn.create". Synonymous with `navigator.credentials.create()` in the browser
`WEBAUTHN_GET`: The string "webauthn.get". Synonymous with `navigator.credentials.get()` in the browser
https://www.w3.org/TR/webauthn-2/#dom-collectedclientdata-type
"""
WEBAUTHN_CREATE = "webauthn.create"
WEBAUTHN_GET = "webauthn.get"
class TokenBindingStatus(str, Enum):
"""
https://www.w3.org/TR/webauthn-2/#dom-tokenbinding-status
"""
PRESENT = "present"
SUPPORTED = "supported"
class TokenBinding(BaseModel):
"""
https://www.w3.org/TR/webauthn-2/#dictdef-tokenbinding
"""
status: TokenBindingStatus
id: Optional[str]
class PublicKeyCredentialRpEntity(WebAuthnBaseModel):
"""Information about the Relying Party.
Attributes:
`name`: A user-readable name for the Relying Party
`id`: A unique, constant value assigned to the Relying Party. Authenticators use this value to associate a credential with a particular Relying Party user
https://www.w3.org/TR/webauthn-2/#dictdef-publickeycredentialrpentity
"""
name: str
id: Optional[str]
class PublicKeyCredentialUserEntity(WebAuthnBaseModel):
"""Information about a user of a Relying Party.
Attributes:
`id`: An "opaque byte sequence" that uniquely identifies a user. Typically something like a UUID, but never user-identifying like an email address. Cannot exceed 64 bytes.
`name`: A value which a user can see to determine which account this credential is associated with. A username or email address is fine here.
`display_name`: A user-friendly representation of a user, like a full name.
https://www.w3.org/TR/webauthn-2/#dictdef-publickeycredentialuserentity
"""
id: bytes
name: str
display_name: str
class PublicKeyCredentialParameters(WebAuthnBaseModel):
"""Information about a cryptographic algorithm that may be used when creating a credential.
Attributes:
`type`: The literal string `"public-key"`
`alg`: A numeric indicator of a particular algorithm
https://www.w3.org/TR/webauthn-2/#dictdef-publickeycredentialparameters
"""
type: Literal["public-key"]
alg: COSEAlgorithmIdentifier
class PublicKeyCredentialDescriptor(WebAuthnBaseModel):
"""Information about a generated credential.
Attributes:
`type`: The literal string `"public-key"`
`id`: The sequence of bytes representing the credential's ID
(optional) `transports`: The types of connections to the client/browser the authenticator supports
https://www.w3.org/TR/webauthn-2/#dictdef-publickeycredentialdescriptor
"""
type: Literal[
PublicKeyCredentialType.PUBLIC_KEY
] = PublicKeyCredentialType.PUBLIC_KEY
id: bytes
transports: Optional[List[AuthenticatorTransport]] = None
class AuthenticatorSelectionCriteria(WebAuthnBaseModel):
"""A Relying Party's requirements for the types of authenticators that may interact with the client/browser.
Attributes:
(optional) `authenticator_attachment`: How the authenticator can be connected to the client/browser
(optional) `resident_key`: Whether the authenticator should be able to store a credential on itself
(optional) `require_resident_key`: DEPRECATED, set a value for `resident_key` instead
(optional) `user_verification`: How the authenticator should be capable of determining user identity
https://www.w3.org/TR/webauthn-2/#dictdef-authenticatorselectioncriteria
"""
authenticator_attachment: Optional[AuthenticatorAttachment]
resident_key: Optional[ResidentKeyRequirement]
require_resident_key: Optional[bool] = False
user_verification: Optional[
UserVerificationRequirement
] = UserVerificationRequirement.PREFERRED
class CollectedClientData(BaseModel):
"""Decoded ClientDataJSON
Attributes:
`type`: Either `"webauthn.create"` or `"webauthn.get"`, for registration and authentication ceremonies respectively
`challenge`: The challenge passed to the authenticator within the options
`origin`: The base domain with protocol on which the registration or authentication ceremony took place (e.g. "https://foo.bar")
(optional) `cross_origin`: Whether or not the the registration or authentication ceremony took place on a different origin (think within an <iframe>)
(optional) `token_binding`: Information on the state of the Token Binding protocol
https://www.w3.org/TR/webauthn-2/#dictdef-collectedclientdata
"""
type: ClientDataType
challenge: bytes
origin: str
cross_origin: Optional[bool]
token_binding: Optional[TokenBinding]
################
#
# Registration
#
################
class PublicKeyCredentialCreationOptions(WebAuthnBaseModel):
"""Registration Options.
Attributes:
`rp`: Information about the Relying Party
`user`: Information about the user
`challenge`: A unique byte sequence to be returned by the authenticator. Helps prevent replay attacks
`pub_key_cred_params`: Cryptographic algorithms supported by the Relying Party when verifying signatures
(optional) `timeout`: How long the client/browser should give the user to interact with an authenticator
(optional) `exclude_credentials`: A list of credentials associated with the user to prevent them from re-enrolling one of them
(optional) `authenticator_selection`: Additional qualities about the authenticators the user can use to complete registration
(optional) `attestation`: The Relying Party's desire for a declaration of an authenticator's provenance via attestation statement
https://www.w3.org/TR/webauthn-2/#dictdef-publickeycredentialcreationoptions
"""
rp: PublicKeyCredentialRpEntity
user: PublicKeyCredentialUserEntity
challenge: bytes
pub_key_cred_params: List[PublicKeyCredentialParameters]
timeout: Optional[int]
exclude_credentials: Optional[List[PublicKeyCredentialDescriptor]]
authenticator_selection: Optional[AuthenticatorSelectionCriteria]
attestation: AttestationConveyancePreference = AttestationConveyancePreference.NONE
class AuthenticatorAttestationResponse(WebAuthnBaseModel):
"""The `response` property on a registration credential.
Attributes:
`client_data_json`: Information the authenticator collects about the client/browser it communicates with
`attestation_object`: Encoded information about an attestation
https://www.w3.org/TR/webauthn-2/#authenticatorattestationresponse
"""
client_data_json: bytes
attestation_object: bytes
class RegistrationCredential(WebAuthnBaseModel):
"""A registration-specific subclass of PublicKeyCredential returned from `navigator.credentials.create()`
Attributes:
`id`: The Base64URL-encoded representation of raw_id
`raw_id`: A byte sequence representing the credential's unique identifier
`response`: The authenticator's attesation data
`type`: The literal string `"public-key"`
`transports`: The authenticator's supported methods of communication with a client/browser
https://www.w3.org/TR/webauthn-2/#publickeycredential
"""
id: str
raw_id: bytes
response: AuthenticatorAttestationResponse
type: Literal[
PublicKeyCredentialType.PUBLIC_KEY
] = PublicKeyCredentialType.PUBLIC_KEY
transports: Optional[List[AuthenticatorTransport]]
class AttestationStatement(BaseModel):
"""A collection of all possible fields that may exist in an attestation statement. Combinations of these fields are specific to a particular attestation format.
https://www.w3.org/TR/webauthn-2/#sctn-defined-attestation-formats
TODO: Decide if this is acceptable, or if we want to split this up into multiple
format-specific classes that define only the fields that are present for a given
attestation format.
"""
sig: Optional[bytes]
x5c: Optional[List[bytes]]
response: Optional[bytes]
alg: Optional[COSEAlgorithmIdentifier]
ver: Optional[str]
cert_info: Optional[bytes]
pub_area: Optional[bytes]
class AuthenticatorDataFlags(BaseModel):
"""Flags the authenticator will set about information contained within the `attestationObject.authData` property.
Attributes:
`up`: [U]ser was [P]resent
`uv`: [U]ser was [V]erified
`at`: [AT]tested credential is included
`ed`: [E]xtension [D]ata is included
https://www.w3.org/TR/webauthn-2/#flags
"""
up: bool
uv: bool
at: bool
ed: bool
class AttestedCredentialData(BaseModel):
| |
<reponame>bcov77/npose<filename>voxel_array.py
#!/usr/bin/env python
import os
import sys
import itertools
import numpy as np
import random
if ( hasattr( os, "FAKE_NUMBA" ) ):
def njit(**k):
def outer_wrapper(func):
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
return wrapper
return outer_wrapper
else:
from numba import njit
# OOB gets clipped to the edges. Be careful to leave them at 0
class VoxelArray:
def __init__(self, lbs, ubs, cbs, dtype="f8", arr=None):
self.dim = len(lbs)
self.lb = lbs
self.ub = ubs
self.cs = cbs
if ( arr is None ):
extents = self.floats_to_indices_no_clip(np.array([self.ub]))[0]
extents += 1
self.arr = np.zeros(extents, dtype=dtype)
else:
self.arr = arr
def copy(self):
vx = VoxelArray(self.lb, self.ub, self.cs, self.arr.dtype, self.arr.copy())
return vx
def save(self, fname):
save_dict = {
"lb":self.lb,
"ub":self.ub,
"cs":self.cs,
"arr":self.arr
}
np.save(fname, save_dict)
@classmethod
def load(cls, fname):
save_dict = np.load(fname, allow_pickle=True).item()
lb = save_dict["lb"]
ub = save_dict["ub"]
cs = save_dict["cs"]
arr = save_dict["arr"]
return cls(lb, ub, cs, arr=arr)
# only used in __init__
def floats_to_indices_no_clip(self, pts):
inds = np.zeros((len(pts), self.dim), dtype=np.int)
for i in range(self.dim):
inds[:,i] = ((pts[:,i] - self.lb[i] ) / self.cs[i])
return inds
def floats_to_indices(self, pts, out=None):
if ( out is None ):
out = np.zeros((len(pts), self.dim), dtype=np.int)
return xform_vectors_w_out(pts, self.lb, self.cs, self.arr.shape, out)
def indices_to_centers(self, inds ):
return numba_indices_to_centers(inds, self.lb, self.cs)
def all_indices(self):
ranges = []
for i in range(self.dim):
ranges.append(list(range(self.arr.shape[i])))
inds = np.array(list(itertools.product(*ranges)))
return inds
def all_centers(self):
inds = self.all_indices()
return self.indices_to_centers(inds)
# One would usuallly type assert(voxel.oob_is_zero())
def oob_is_zero(self):
# This could certainly be made more efficient
all_indices = self.all_indices()
is_good = np.zeros(len(all_indices))
for i in range(self.dim):
is_good |= (all_indices[:,i] == 0) | (all_indices[:,i] == self.arr.shape[i]-1)
good_indices = all_indices[is_good]
return np.any(self.arr[good_indices])
# This uses the centers as measurement
def indices_within_x_of(self, _x, pt):
low = pt - _x
high = pt + _x
# If you hit these, you are about to make a mistake
assert( not np.any( low <= self.lb + self.cs))
assert( not np.any( high >= self.ub - self.cs ) )
bounds = self.floats_to_indices( np.array( [low, high] ) )
ranges = []
size = 1
for i in range(self.dim):
ranges.append(np.arange(bounds[0, i], bounds[1, i] + 1) )
size *= (len(ranges[-1]))
ranges = np.array(ranges)
#in numba version, this whole bottom part is tested for loops
# indices = np.array(itertools.product(*ranges))
indices = np.array(np.meshgrid(*ranges)).T.reshape(-1, len(ranges))
centers = self.indices_to_centers(indices)
return indices[ np.sum(np.square(centers - pt), axis=-1) < _x*_x ]
def dump_mask_true(self, fname, mask, resname="VOX", atname="VOXL", z=None, fraction=1 ):
indices = np.array(list(np.where(mask))).T
centers = self.indices_to_centers(indices)
if ( self.dim == 2 ):
centers_ = np.zeros((len(centers), 3), np.float)
centers_[:,:2] = centers
centers_[:,2] = z
centers = centers_
if ( fraction < 1 ):
mask = np.random.random(len(indices)) < fraction
# indices = indices[mask]
centers = centers[mask]
f = open(fname, "w")
anum = 1
rnum = 1
for ind, xyz in enumerate(centers):
f.write("%s%5i %4s %3s %s%4i %8.3f%8.3f%8.3f%6.2f%6.2f %11s\n"%(
"HETATM",
anum,
atname,
resname,
"A",
rnum,
xyz[0],xyz[1],xyz[2],
1.0,
1.0,
"HB"
))
anum += 1
rnum += 1
anum %= 100000
rnum %= 10000
f.close()
def dump_grids_true(self, fname, func, resname="VOX", atname="VOXL", jitter=False, z=None):
centers = self.all_centers()
vals = self.arr[tuple(self.floats_to_indices(centers).T)]
if ( self.dim == 2 ):
centers_ = np.zeros((len(centers), 3), np.float)
centers_[:,:2] = centers
centers_[:,2] = z
centers = centers_
f = open(fname, "w")
anum = 1
rnum = 1
for ind, xyz in enumerate(centers):
if ( jitter ):
xyz[0] += 0.01*2*(1 - 0.5*random.random())
xyz[1] += 0.01*2*(1 - 0.5*random.random())
xyz[2] += 0.01*2*(1 - 0.5*random.random())
val = vals[ind]
if (not func(val)):
continue
f.write("%s%5i %4s %3s %s%4i %8.3f%8.3f%8.3f%6.2f%6.2f %11s\n"%(
"HETATM",
anum,
atname,
resname,
"A",
rnum,
xyz[0],xyz[1],xyz[2],
1.0,
1.0,
"HB"
))
anum += 1
rnum += 1
anum %= 100000
rnum %= 10000
f.close()
def clash_check(self, pts, max_clashes):
assert(self.dim == 3)
return numba_clash_check(pts, max_clashes, self.arr, self.lb, self.cs)
def ray_trace(self, start, end, max_clashes, debug=False):
assert(self.dim == 3)
return numba_ray_trace(start, end, max_clashes, self.arr, self.lb, self.cs, debug)
def ray_trace_many(self, starts, ends, max_clashes, debug=False):
assert(self.dim == 3)
return numba_ray_trace_many(starts, ends, max_clashes, self.arr, self.lb, self.cs, debug)
def ray_trace_report_end(self, start, end, debug=False):
assert(self.dim == 3)
return numba_ray_trace_report_end(start, end, self.arr, self.lb, self.cs)
def ray_trace_report_end_many(self, starts, ends, debug=False):
assert(self.dim == 3)
return numba_ray_trace_report_end_many(starts, ends, self.arr, self.lb, self.cs)
def add_to_clashgrid(self, pts, atom_radius, store_val=True ):
if ( isinstance( atom_radius, list ) ):
assert(len(pts) == len(atom_radius))
numba_make_clashgrid_var_atom_radius(pts, atom_radius, self.arr, self.lb, self.ub, self.cs, self.arr.shape, store_val)
else:
numba_make_clashgrid(pts, atom_radius, self.arr, self.lb, self.ub, self.cs, self.arr.shape, store_val)
def add_to_sum_grid(self, pts, atom_radius, store_val=1 ):
numba_make_sum_grid(pts, atom_radius, self.arr, self.lb, self.ub, self.cs, self.arr.shape, store_val)
# fill the voxel array with ipt for all voxels closest to ipt.
# initialize self to -1 and dist_grid to +100000
def add_to_near_grid(self, pts, atom_radius, dist_grid, store_vals = None):
assert((self.lb == dist_grid.lb).all())
assert((self.ub == dist_grid.ub).all())
assert((self.cs == dist_grid.cs).all())
assert(self.arr.shape == dist_grid.arr.shape)
if ( store_vals is None ):
store_vals = np.arange(len(pts))
numba_add_to_near_grid(pts, store_vals, atom_radius, self.arr, dist_grid.arr, self.lb, self.ub, self.cs, self.arr.shape)
# fill voxels with -1 if below surface, 1 if above
def do_surface_crawl(self, start, normal, direction, distance):
return numba_do_surface_crawl(start, normal, direction, distance, self.arr, self.lb, self.ub, self.cs, self.arr.shape)
def flood_fill(self, fill_val, overwrite_val):
if ( self.dim == 2 ):
return numba_flood_fill_2d(fill_val, overwrite_val, self.arr, self.lb, self.ub, self.cs, self.arr.shape )
if ( self.dim == 3 ):
return numba_flood_fill_3d(fill_val, overwrite_val, self.arr, self.lb, self.ub, self.cs, self.arr.shape )
assert(False)
def flood_fill_from_here(self, fill_val, overwrite_val, start_idx):
return numba_flood_fill_3d_from_here(fill_val, overwrite_val, start_idx, self.arr, self.lb, self.ub, self.cs, self.arr.shape)
@njit(fastmath=True,cache=True)
def numba_seek_to_surface(pt, normal_step, up_down_steps, fail, arr, lb, ub, cs, shape):
initial_pt = lookup_vec(pt, arr, lb, cs, shape)
if ( initial_pt == 0 ):
fail[0] = True
return pt
look_for = 1 if initial_pt == -1 else -1
up_vec = pt.copy()
down_vec = pt.copy()
for i in range(up_down_steps):
up_vec += normal_step
if ( lookup_vec(up_vec, arr, lb, cs, shape) == look_for ):
return up_vec
down_vec -= normal_step
if ( lookup_vec(down_vec, arr, lb, cs, shape) == look_for ):
return down_vec
fail[0] = True
return up_vec
@njit(fastmath=True,cache=True)
def distance_two_pts(pt1, pt2):
x = pt1[0] - pt2[0]
y = pt1[1] - pt2[1]
z = pt1[2] - pt2[2]
return np.sqrt( x*x + y*y + z*z )
# keep, visited locations, current distance
@njit(fastmath=True,cache=True)
def numba_do_surface_crawl(start, normal, direction, distance, arr, lb, ub, cs, shape):
up_down_steps = 20
up_down_step = cs[0]*0.3
normal_step = normal*up_down_step
forward_step_size = cs[0]
forward_step = forward_step_size * direction
fail = np.array([0], np.bool_)
traversed = []
traveled = 0
prev = start
current = start
while ( traveled < distance ):
surf = numba_seek_to_surface(current, normal_step, up_down_steps, fail, arr, lb, ub, cs, shape)
if ( fail[0] ):
return traversed, traveled
traversed.append(surf)
# traveled += distance_two_pts( surf, prev )
traveled = distance_two_pts( surf, start )
prev = surf
current = prev + forward_step
return traversed, traveled
@njit(fastmath=True,cache=True)
def numba_add_to_near_grid(pts, store_vals, atom_radius, near_grid, dist_grid, lb, ub, cs, shape):
for i in range(len(pts)):
pt = pts[i]
store_val = store_vals[i]
numba_store_near_grid(near_grid, dist_grid, atom_radius*2, pt, store_val, lb, ub, cs, shape)
@njit(fastmath=True,cache=True)
def numba_store_near_grid(near_grid, dist_grid, _x, pt, idx, lb, ub, cs, shape):
# these should like really be here
assert(len(pt) == 3)
low_high = np.array([[0, 0, 0], [0, 0, 0]], dtype=np.float_)
for i in range(3):
low_high[0, i] = pt[i] - _x
low_high[1, i] = pt[i] + _x
for i in range(3):
assert( low_high[0, i] > lb[i] + cs[i] )
assert( low_high[1, i] < ub[i] - cs[i] )
# transform bounds into upper and lower corners in voxel array indices
bounds = xform_vectors( low_high, lb, cs, shape )
# translate voxel array indices back to 3d coords and do distance check
_x2 = _x*_x
for i in range(bounds[0, 0], bounds[1, 0] + 1):
x = numba_ind_index_to_center(i, lb[0], cs[0]) - pt[0]
x2 = x*x
for j in range(bounds[0, 1], bounds[1, 1] + 1):
y = numba_ind_index_to_center(j, lb[1], cs[1]) - pt[1]
y2 = y*y
for k in range(bounds[0, 2], bounds[1, 2] + 1):
z = numba_ind_index_to_center(k, lb[2], cs[2]) - pt[2]
z2 = z*z
dist2 = x2 + y2 + z2
if ( dist2 < _x2 ):
if ( dist2 < dist_grid[i, j, k] ):
near_grid[i, j, k] = idx
dist_grid[i, j, k] = dist2
@njit(fastmath=True,cache=True)
def numba_make_sum_grid(pts, atom_radius, arr, lb, ub, cs, shape, store_val):
for i in range(len(pts)):
pt = pts[i]
numba_indices_add_within_x_of(arr, store_val, atom_radius*2, pt, lb, ub, cs, shape)
@njit(fastmath=True,cache=True)
def numba_indices_add_within_x_of(arr, to_store, _x, pt, lb, ub, cs, shape):
# | |
<gh_stars>1-10
# Copyright 2020 Red Hat, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Validators for ``hw`` namespaced extra specs."""
from nova.api.validation.extra_specs import base
realtime_validators = [
base.ExtraSpecValidator(
name='hw:cpu_realtime',
description=(
'Determine whether realtime mode should be enabled for the '
'instance or not. '
'Only supported by the libvirt virt driver.'
),
value={
'type': bool,
'description': 'Whether to enable realtime priority.',
},
),
base.ExtraSpecValidator(
name='hw:cpu_realtime_mask',
description=(
'A exclusion mask of CPUs that should not be enabled for '
'realtime. '
'Only supported by the libvirt virt driver.'
),
value={
'type': str,
'pattern': r'(\^)?\d+((-\d+)?(,\^?\d+(-\d+)?)?)*',
},
),
]
hide_hypervisor_id_validator = [
base.ExtraSpecValidator(
name='hw:hide_hypervisor_id',
description=(
'Determine whether the hypervisor ID should be hidden from the '
'guest. '
'Only supported by the libvirt virt driver.'
),
value={
'type': bool,
'description': 'Whether to hide the hypervisor ID.',
},
)
]
cpu_policy_validators = [
base.ExtraSpecValidator(
name='hw:cpu_policy',
description=(
'The policy to apply when determining what host CPUs the guest '
'CPUs can run on. '
'If ``shared`` (default), guest CPUs can be overallocated but '
'cannot float across host cores. '
'If ``dedicated``, guest CPUs cannot be overallocated but are '
'individually pinned to their own host core. '
'If ``mixed``, the policy for each instance CPU can be specified '
'using the ``hw:cpu_dedicated_mask`` or ``hw:cpu_realtime_mask`` '
'extra specs.'
'Only supported by the libvirt virt driver.'
),
value={
'type': str,
'description': 'The CPU policy.',
'enum': [
'dedicated',
'shared',
'mixed',
],
},
),
base.ExtraSpecValidator(
name='hw:cpu_thread_policy',
description=(
'The policy to apply when determining whether the destination '
'host can have hardware threads enabled or not. '
'If ``prefer`` (default), hosts with hardware threads will be '
'preferred. '
'If ``require``, hosts with hardware threads will be required. '
'If ``isolate``, hosts with hardware threads will be forbidden. '
'Only supported by the libvirt virt driver.'
),
value={
'type': str,
'description': 'The CPU thread policy.',
'enum': [
'prefer',
'isolate',
'require',
],
},
),
base.ExtraSpecValidator(
name='hw:emulator_threads_policy',
description=(
'The policy to apply when determining whether emulator threads '
'should be offloaded to a separate isolated core or to a pool '
'of shared cores. '
'If ``share``, emulator overhead threads will be offloaded to a '
'pool of shared cores. '
'If ``isolate``, emulator overhead threads will be offloaded to '
'their own core. '
'Only supported by the libvirt virt driver.'
),
value={
'type': str,
'description': 'The emulator thread policy.',
'enum': [
'isolate',
'share',
],
},
),
base.ExtraSpecValidator(
name='hw:cpu_dedicated_mask',
description=(
'A mapping of **guest** (instance) CPUs to be pinned to **host** '
'CPUs for an instance with a ``mixed`` CPU policy. '
'Any **guest** CPUs which are not in this mapping will float '
'across host cores. '
'Only supported by the libvirt virt driver.'
),
value={
'type': str,
'description': (
'The **guest** CPU mapping to be pinned to **host** CPUs for '
'an instance with a ``mixed`` CPU policy.'
),
# This pattern is identical to 'hw:cpu_realtime_mask' pattern.
'pattern': r'\^?\d+((-\d+)?(,\^?\d+(-\d+)?)?)*',
},
),
]
hugepage_validators = [
base.ExtraSpecValidator(
name='hw:mem_page_size',
description=(
'The size of memory pages to allocate to the guest with. '
'Can be one of the three alias - ``large``, ``small`` or '
'``any``, - or an actual size. '
'Only supported by the libvirt virt driver.'
),
value={
'type': str,
'description': 'The size of memory page to allocate',
'pattern': r'(large|small|any|\d+([kKMGT]i?)?(b|bit|B)?)',
},
),
]
numa_validators = [
base.ExtraSpecValidator(
name='hw:numa_nodes',
description=(
'The number of virtual NUMA nodes to allocate to configure the '
'guest with. '
'Each virtual NUMA node will be mapped to a unique host NUMA '
'node. '
'Only supported by the libvirt virt driver.'
),
value={
'type': int,
'description': 'The number of virtual NUMA nodes to allocate',
'min': 1,
},
),
base.ExtraSpecValidator(
name='hw:numa_cpus.{num}',
description=(
'A mapping of **guest** (instance) CPUs to the **guest** (not '
'host!) NUMA node identified by ``{num}``. '
'This can be used to provide asymmetric CPU-NUMA allocation and '
'is necessary where the number of guest NUMA nodes is not a '
'factor of the number of guest CPUs. '
'Only supported by the libvirt virt driver.'
),
parameters=[
{
'name': 'num',
'pattern': r'\d+', # positive integers
'description': 'The ID of the **guest** NUMA node.',
},
],
value={
'type': str,
'description': (
'The guest CPUs, in the form of a CPU map, to allocate to the '
'guest NUMA node identified by ``{num}``.'
),
'pattern': r'\^?\d+((-\d+)?(,\^?\d+(-\d+)?)?)*',
},
),
base.ExtraSpecValidator(
name='hw:numa_mem.{num}',
description=(
'A mapping of **guest** memory to the **guest** (not host!) NUMA '
'node identified by ``{num}``. '
'This can be used to provide asymmetric memory-NUMA allocation '
'and is necessary where the number of guest NUMA nodes is not a '
'factor of the total guest memory. '
'Only supported by the libvirt virt driver.'
),
parameters=[
{
'name': 'num',
'pattern': r'\d+', # positive integers
'description': 'The ID of the **guest** NUMA node.',
},
],
value={
'type': int,
'description': (
'The guest memory, in MB, to allocate to the guest NUMA node '
'identified by ``{num}``.'
),
'min': 1,
},
),
base.ExtraSpecValidator(
name='hw:pci_numa_affinity_policy',
description=(
'The NUMA affinity policy of any PCI passthrough devices or '
'SR-IOV network interfaces attached to the instance. '
'If ``required`, only PCI devices from one of the host NUMA '
'nodes the instance VCPUs are allocated from can be used by said '
'instance. '
'If ``preferred``, any PCI device can be used, though preference '
'will be given to those from the same NUMA node as the instance '
'VCPUs. '
'If ``legacy`` (default), behavior is as with ``required`` unless '
'the PCI device does not support provide NUMA affinity '
'information, in which case affinity is ignored. '
'Only supported by the libvirt virt driver.'
),
value={
'type': str,
'description': 'The PCI NUMA affinity policy',
'enum': [
'required',
'preferred',
'legacy',
'socket',
],
},
),
]
cpu_topology_validators = [
base.ExtraSpecValidator(
name='hw:cpu_sockets',
description=(
'The number of virtual CPU threads to emulate in the guest '
'CPU topology. '
'Defaults to the number of vCPUs requested. '
'Only supported by the libvirt virt driver.'
),
value={
'type': int,
'description': 'A number of virtual CPU sockets',
'min': 1,
},
),
base.ExtraSpecValidator(
name='hw:cpu_cores',
description=(
'The number of virtual CPU cores to emulate per socket in the '
'guest CPU topology. '
'Defaults to ``1``.'
'Only supported by the libvirt virt driver. '
),
value={
'type': int,
'description': 'A number of virtual CPU cores',
'min': 1,
},
),
base.ExtraSpecValidator(
name='hw:cpu_threads',
description=(
'The number of virtual CPU threads to emulate per core in the '
'guest CPU topology.'
'Defaults to ``1``. '
'Only supported by the libvirt virt driver. '
),
value={
'type': int,
'description': 'A number of virtual CPU threads',
'min': 1,
},
),
base.ExtraSpecValidator(
name='hw:max_cpu_sockets',
description=(
'The max number of virtual CPU threads to emulate in the '
'guest CPU topology. '
'This is used to limit the topologies that can be requested by '
'an image and will be used to validate the ``hw_cpu_sockets`` '
'image metadata property. '
'Only supported by the libvirt virt driver. '
),
value={
'type': int,
'description': 'A number of virtual CPU sockets',
'min': 1,
},
),
base.ExtraSpecValidator(
name='hw:max_cpu_cores',
description=(
'The max number of virtual CPU cores to emulate per socket in the '
'guest CPU topology. '
'This is used to limit the topologies that can be requested by an '
'image and will be used to validate the ``hw_cpu_cores`` image '
'metadata property. '
'Only supported by the libvirt virt driver. | |
kwlabels = {
'top': self._cparams.get('toplabel', None),
'bot': self._cparams.get('botlabel', None),
'lft': self._cparams.get('lftlabel', None),
'rgt': self._cparams.get('rgtlabel', None),
'center': self._cparams.get('clabel', None)
}
if 'label' in self._cparams:
kwlabels[lblloc] = self._cparams.get('label')
# Add labels defined in **kwargs to the _userlabels list
for loc, label in kwlabels.items():
if label is not None:
rotate = (theta if lblrotate else 0)
self.label(label, loc, fontsize=lblsize, rotate=rotate, color=lblcolor)
for label in self._userlabels:
if not label.rotate:
rotate = 0
elif label.rotate is True:
rotate = theta
else:
rotate = label.rotate
self._place_label(label.label, loc=label.loc, ofst=label.ofst,
align=label.align, rotation=rotate,
font=label.font, fontsize=label.fontsize,
color=label.color)
# Add element-specific anchors
for name, pos in self.anchors.items():
self.absanchors[name] = self.transform.transform(pos)
self.absanchors['xy'] = self.transform.transform((0, 0))
drop = self._cparams.get('drop', None)
if drop is None or not self._cparams.get('move_cur', True):
self.absdrop = Point(dwgxy), dwgtheta
elif self.params.get('theta', None) == 0:
# Element def specified theta = 0, don't change
self.absdrop = self.transform.transform(drop), dwgtheta
else:
self.absdrop = self.transform.transform(drop), theta
return self.absdrop
def get_bbox(self, transform=False, includetext=True):
''' Get element bounding box
Args:
transform: Apply the element transform to the bbox
to get bounds in Drawing coordinates
includetext: Consider text when calculating bounding
box. Text width and height can vary by font,
so this produces an estimate of bounds.
Returns:
Corners of the bounding box, (xmin, ymin, xmax, ymax)
'''
xmin = ymin = math.inf
xmax = ymax = -math.inf
for segment in self.segments:
if not includetext and isinstance(segment, SegmentText): continue
if transform:
segment = segment.xform(self.transform)
segxmin, segymin, segxmax, segymax = segment.get_bbox()
xmin = min(xmin, segxmin)
xmax = max(xmax, segxmax)
ymin = min(ymin, segymin)
ymax = max(ymax, segymax)
return BBox(xmin, ymin, xmax, ymax)
def add_label(self, label, loc='top', ofst=None, align=None,
rotation=0, fontsize=None, size=None, font=None, color=None):
''' Add a label to the element, after element placement
Args:
label: Text to add. If list, list items will be evenly spaced
along the element.
loc: Location for text relative to element, either
['top', 'bot', 'lft', 'rgt'] or name of an anchor
ofst: Offset between text and element. Defaults to
Element.lblofst. Can be list of [x, y] offets.
align: Tuple of (horizontal, vertical) alignment where
horizontal is ['center', 'left', 'right'] and vertical
is ['center', 'top', 'bottom']
rotation: Rotation angle (degrees)
fontsize: Font size
font: Font family
color: Label text color
'''
warnings.warn('`add_label` is deprecated. Use `label` instead.', DeprecationWarning)
if align is None:
align = (None, None)
fontsize = fontsize if fontsize else size
self._place_label(label, loc, ofst, align=align, rotation=rotation,
fontsize=fontsize, font=font, color=color)
def _place_label(self, label: str, loc: LabelLoc=None,
ofst: XY | float | None=None, align: Align=(None, None),
rotation: float=0, fontsize: float=None,
font: str=None, color: str=None) -> None:
''' Adds the label Segment to the element, AFTER element placement
Args:
label: Text to add. If list, list items will be evenly spaced
along the element.
loc: Location for text relative to element, either
['top', 'bot', 'lft', 'rgt'] or name of an anchor
ofst: Offset between text and element. Defaults to Element.lblofst.
Can be list of [x, y] offets.
align: Tuple of (horizontal, vertical) alignment where horizontal
is ['center', 'left', 'right'] and vertical is ['center',
'top', 'bottom']
rotation: Rotation angle (degrees)
fontsize: Font size
font: Font family
color: Label text color
'''
rotation = (rotation + 360) % 360
if rotation > 90 and rotation < 270:
rotation -= 180 # Keep the label from going upside down
if loc is None:
loc = self._cparams.get('lblloc', 'top')
loc = {'bottom': 'bot', 'left': 'lft', 'right': 'rgt'}.get(loc, loc) # type: ignore
# This ensures a 'top' label is always on top, regardless of rotation
theta = self.transform.theta
if (theta % 360) > 90 and (theta % 360) <= 270:
if loc == 'top':
loc = 'bot'
elif loc == 'bot':
loc = 'top'
elif loc == 'lft':
loc = 'rgt'
elif loc == 'rgt':
loc = 'lft'
if align is None and loc == 'center' and isinstance(label, (list, tuple)):
align = ('center', 'center')
elif align is None:
align = (None, None)
if None in align: # Determine best alignment for label based on angle
th = theta - rotation
# Below alignment divisions work for label on top. Rotate angle for other sides.
if loc == 'lft':
th = th + 90
elif loc == 'bot':
th = th + 180
elif loc == 'rgt':
th = th + 270
th = (th+360) % 360 # Normalize angle so it's positive, clockwise
rotalign: list[Align] = [('center', 'bottom'), # label on top
('right', 'bottom'),
('right', 'center'), # label on right
('right', 'top'),
('center', 'top'), # label on bottom
('left', 'top'),
('left', 'center'), # label on left
('left', 'bottom')]
# Index into rotalign for a "top" label that's been rotated
rotalignidx = int(round((th/360)*8) % 8)
if loc and loc in self.anchors:
x1, y1, x2, y2 = self.get_bbox(includetext=False)
if (math.isclose(self.anchors[loc][0], x1, abs_tol=.15) or
math.isclose(self.anchors[loc][0], x2, abs_tol=.15) or
math.isclose(self.anchors[loc][1], y1, abs_tol=.15) or
math.isclose(self.anchors[loc][1], y2, abs_tol=.15)):
# Anchor is on an edge
dofst = self._cparams.get('lblofst', .1)
alignH: Halign
alignV: Valign
if math.isclose(self.anchors[loc][0], x1, abs_tol=.15):
alignH = 'right'
ofstx = -dofst
elif math.isclose(self.anchors[loc][0], x2, abs_tol=.15):
alignH = 'left'
ofstx = dofst
else:
alignH = 'center'
ofstx = 0
if math.isclose(self.anchors[loc][1], y1, abs_tol=.15):
alignV = 'top'
ofsty = -dofst
elif math.isclose(self.anchors[loc][1], y2, abs_tol=.15):
alignV = 'bottom'
ofsty = dofst
else:
alignV = 'center'
ofsty = 0
align = (align[0] or alignH, align[1] or alignV)
rotalignidx = (rotalign.index(align) + round((th/360)*8)) % 8
if ofst is None and not isinstance(label, (tuple, list)):
ofst = [ofstx, ofsty]
if loc == 'center':
align = (align[0] or 'center', align[1] or 'center')
else:
ralign = rotalign[rotalignidx]
align = (align[0] or ralign[0], align[1] or ralign[1])
xmax = self.bbox.xmax
xmin = self.bbox.xmin
ymax = self.bbox.ymax
ymin = self.bbox.ymin
if not math.isfinite(xmax+xmin+ymax+ymin):
xmax = xmin = ymax = ymin = .1
args: MutableMapping[str, Any] = {}
if fontsize is not None:
args['fontsize'] = fontsize
if font is not None:
args['font'] = font
if color is not None:
args['color'] = color
lblparams = dict(ChainMap(args, self._cparams))
lblparams = {'color': lblparams.get('color'),
'font': lblparams.get('font'),
'fontsize': lblparams.get('fontsize', 14),
'align': align,
'rotation': rotation}
if ofst is None:
ofst = self._cparams.get('lblofst', .1)
if isinstance(label, (list, tuple)):
# Divide list along length
if loc == 'top':
xdiv = (xmax-xmin)/(len(label)+1)
ofst = Point((0, ofst)) if not isinstance(ofst, (list, tuple)) else Point(ofst)
for i, lbltxt in enumerate(label):
xy = Point((xmin+xdiv*(i+1), ymax))
self.segments.append(SegmentText(xy+ofst, lbltxt, **lblparams))
elif loc == 'bot':
xdiv = (xmax-xmin)/(len(label)+1)
ofst = Point((0, -ofst)) if not isinstance(ofst, (list, tuple)) else Point(ofst)
for i, lbltxt in enumerate(label):
xy = Point((xmin+xdiv*(i+1), ymin))
self.segments.append(SegmentText(xy+ofst, lbltxt, **lblparams))
elif loc == 'lft':
ydiv = (ymax-ymin)/(len(label)+1)
ofst = Point((-ofst, 0)) if not isinstance(ofst, (list, tuple)) else Point(ofst)
for i, lbltxt in enumerate(label):
xy = Point((xmin, ymin+ydiv*(i+1)))
self.segments.append(SegmentText(xy+ofst, lbltxt, **lblparams))
elif loc == 'rgt':
ydiv = (ymax-ymin)/(len(label)+1)
ofst = Point((ofst, 0)) if not isinstance(ofst, (list, tuple)) else Point(ofst)
for i, lbltxt in enumerate(label):
xy = Point((xmax, ymin+ydiv*(i+1)))
self.segments.append(SegmentText(xy+ofst, lbltxt, **lblparams))
elif loc == 'center':
xdiv = (xmax-xmin)/(len(label)+1)
ofst = Point((0, ofst)) if not isinstance(ofst, (list, tuple)) else Point(ofst)
for i, lbltxt in enumerate(label):
xy = Point((xmin+xdiv*(i+1), 0))
self.segments.append(SegmentText(xy+ofst, lbltxt, **lblparams))
elif isinstance(label, str):
# Place in center
if loc == 'top':
ofst = Point((0, ofst)) if not isinstance(ofst, (list, tuple)) else Point(ofst)
xy = Point(((xmax+xmin)/2, ymax))
elif loc == 'bot':
ofst = Point((0, -ofst)) if not isinstance(ofst, (list, tuple)) else Point(ofst) # type: ignore
xy = Point(((xmax+xmin)/2, ymin))
elif loc == 'lft':
ofst = Point((-ofst, 0)) if not isinstance(ofst, (list, tuple)) else Point(ofst) # type: ignore
xy = Point((xmin, (ymax+ymin)/2))
elif loc == 'rgt':
ofst = Point((ofst, 0)) if not isinstance(ofst, (list, tuple)) else Point(ofst)
xy = Point((xmax, (ymax+ymin)/2))
elif loc == 'center':
ofst = Point((0, ofst)) if not isinstance(ofst, (list, tuple)) else Point(ofst)
xy = Point(((xmax+xmin)/2, (ymax+ymin)/2))
elif loc in self.anchors:
xy = Point(self.anchors[loc]) # type: ignore
ofst = Point((0, ofst)) if not isinstance(ofst, (list, tuple)) else Point(ofst)
xy = Point(xy)
else:
raise ValueError('Undefined location {}'.format(loc))
xy = xy + ofst
| |
from lib.base_controller import CommandHelp
from lib.utils import common, util, version, constants
from lib.get_controller import (
GetConfigController,
GetDistributionController,
GetPmapController,
GetRolesController,
GetSIndexController,
GetStatisticsController,
GetUdfController,
GetUsersController,
GetLatenciesController,
)
from .client.info import ASProtocolError
from .live_cluster_command_controller import LiveClusterCommandController
@CommandHelp('"show" is used to display Aerospike Statistics configuration.')
class ShowController(LiveClusterCommandController):
def __init__(self):
self.controller_map = {
"pmap": ShowPmapController,
"distribution": ShowDistributionController,
"mapping": ShowMappingController,
"best-practices": ShowBestPracticesController,
"udfs": ShowUdfsController,
"sindex": ShowSIndexController,
"config": ShowConfigController,
"latencies": ShowLatenciesController,
"statistics": ShowStatisticsController,
"roles": ShowRolesController,
"users": ShowUsersController,
# TODO
# 'rosters': ShowRosterController,
# 'racks': ShowRacksController,
# 'jobs': ShowJobsController,
}
self.modifiers = set()
def _do_default(self, line):
self.execute_help(line)
@CommandHelp(
'"show distribution" is used to show the distribution of object sizes',
"and time to live for node and a namespace.",
)
class ShowDistributionController(LiveClusterCommandController):
def __init__(self):
self.modifiers = set(["with", "for"])
self.getter = GetDistributionController(self.cluster)
@CommandHelp("Shows the distributions of Time to Live and Object Size")
def _do_default(self, line):
actions = (
util.Future(self.do_time_to_live, line[:]).start(),
util.Future(self.do_object_size, line[:]).start(),
)
return [action.result() for action in actions]
@CommandHelp("Shows the distribution of TTLs for namespaces")
def do_time_to_live(self, line):
histogram = self.getter.do_distribution("ttl", nodes=self.nodes)
return util.Future(
self.view.show_distribution,
"TTL Distribution",
histogram,
"Seconds",
"ttl",
self.cluster,
like=self.mods["for"],
)
@CommandHelp(
"Shows the distribution of namespace Eviction TTLs for server version 3.7.5 and below"
)
def do_eviction(self, line):
histogram = self.getter.do_distribution("evict", nodes=self.nodes)
return util.Future(
self.view.show_distribution,
"Eviction Distribution",
histogram,
"Seconds",
"evict",
self.cluster,
like=self.mods["for"],
)
@CommandHelp(
"Shows the distribution of Object sizes for namespaces",
" Options:",
" -b - Force to show byte wise distribution of Object Sizes.",
" Default is rblock wise distribution in percentage",
" -k <buckets> - Maximum number of buckets to show if -b is set.",
" It distributes objects in same size k buckets and ",
" displays only buckets that have objects in them. ",
" [default is 5].",
)
def do_object_size(self, line):
byte_distribution = util.check_arg_and_delete_from_mods(
line=line, arg="-b", default=False, modifiers=self.modifiers, mods=self.mods
)
bucket_count = util.get_arg_and_delete_from_mods(
line=line,
arg="-k",
return_type=int,
default=5,
modifiers=self.modifiers,
mods=self.mods,
)
if not byte_distribution:
histogram = self.getter.do_object_size(nodes=self.nodes)
units = None
try:
units = common.get_histogram_units(histogram)
if units is None:
units = "Record Blocks"
except Exception as e:
self.logger.error(e)
return
return util.Future(
self.view.show_distribution,
"Object Size Distribution",
histogram,
units,
"objsz",
self.cluster,
like=self.mods["for"],
)
histogram = self.getter.do_object_size(
byte_distribution=True, bucket_count=bucket_count, nodes=self.nodes
)
histogram_name = "objsz"
title = "Object Size Distribution"
unit = "Bytes"
set_bucket_count = True
return util.Future(
self.view.show_object_distribution,
title,
histogram,
unit,
histogram_name,
bucket_count,
set_bucket_count,
self.cluster,
like=self.mods["for"],
)
@CommandHelp('"show latencies" is used to show the server latency histograms')
class ShowLatenciesController(LiveClusterCommandController):
def __init__(self):
self.modifiers = set(["with", "like", "for"])
self.latency_getter = GetLatenciesController(self.cluster)
def get_namespace_set(self):
namespace_set = set()
if self.mods["for"]:
namespace_set = self.latency_getter.get_namespace_set(self.nodes)
namespace_set = set(util.filter_list(namespace_set, self.mods["for"]))
return namespace_set
def sort_data_by_histogram_name(self, latency_data):
hist_latency = {}
for node_id, hist_data in list(latency_data.items()):
if isinstance(hist_data, Exception):
continue
for hist_name, data in list(hist_data.items()):
if hist_name not in hist_latency:
hist_latency[hist_name] = {node_id: data}
else:
hist_latency[hist_name][node_id] = data
return hist_latency
# It would be nice if the 'show latencies' help section could be completely removed for servers prior to 5.1
@CommandHelp(
"Displays latency information for the Aerospike cluster.",
" Options:",
" -e - Exponential increment of latency buckets, i.e. 2^0 2^(e) ... 2^(e * i)",
" [default: 3]",
" -b - Number of latency buckets to display.",
" [default: 3]",
" -v - Set to display verbose output of optionally configured histograms.",
)
def _do_default(self, line):
increment = util.get_arg_and_delete_from_mods(
line=line,
arg="-e",
return_type=int,
default=3,
modifiers=self.modifiers,
mods=self.mods,
)
buckets = util.get_arg_and_delete_from_mods(
line=line,
arg="-b",
return_type=int,
default=3,
modifiers=self.modifiers,
mods=self.mods,
)
verbose = util.check_arg_and_delete_from_mods(
line=line, arg="-v", default=False, modifiers=self.modifiers, mods=self.mods
)
namespace_set = self.get_namespace_set()
(
latencies_nodes,
latency_nodes,
) = self.latency_getter.get_latencies_and_latency_nodes(self.nodes)
latencies = self.latency_getter.get_all(
self.nodes, buckets, increment, verbose, namespace_set
)
# No nodes support "show latencies"
if len(latencies_nodes) == 0:
self.logger.warning(
"'show latencies' is not fully supported on aerospike versions <= 5.0"
)
# Some nodes support latencies and some do not
elif len(latency_nodes) != 0:
self.logger.warning(
"'show latencies' is not fully supported on aerospike versions <= 5.0"
)
# TODO: This format should probably be returned from get controller
latencies = self.sort_data_by_histogram_name(latencies)
self.view.show_latency(
latencies,
self.cluster,
show_ns_details=True if namespace_set else False,
**self.mods,
)
@CommandHelp('"show config" is used to display Aerospike configuration settings')
class ShowConfigController(LiveClusterCommandController):
def __init__(self):
self.modifiers = set(["with", "like", "diff", "for"])
self.getter = GetConfigController(self.cluster)
@CommandHelp(
"Displays service, network, and namespace configuration",
" Options:",
" -r - Repeat output table title and row header after every <terminal width> columns.",
" [default: False, no repetition]",
" -flip - Flip output table to show Nodes on Y axis and config on X axis.",
)
def _do_default(self, line):
actions = (
util.Future(self.do_service, line[:]).start(),
util.Future(self.do_network, line[:]).start(),
util.Future(self.do_namespace, line[:]).start(),
)
return [action.result() for action in actions]
@CommandHelp("Displays service configuration")
def do_service(self, line):
title_every_nth = util.get_arg_and_delete_from_mods(
line=line,
arg="-r",
return_type=int,
default=0,
modifiers=self.modifiers,
mods=self.mods,
)
flip_output = util.check_arg_and_delete_from_mods(
line=line,
arg="-flip",
default=False,
modifiers=self.modifiers,
mods=self.mods,
)
service_configs = self.getter.get_service(nodes=self.nodes)
return util.Future(
self.view.show_config,
"Service Configuration",
service_configs,
self.cluster,
title_every_nth=title_every_nth,
flip_output=flip_output,
**self.mods,
)
@CommandHelp("Displays network configuration")
def do_network(self, line):
title_every_nth = util.get_arg_and_delete_from_mods(
line=line,
arg="-r",
return_type=int,
default=0,
modifiers=self.modifiers,
mods=self.mods,
)
flip_output = util.check_arg_and_delete_from_mods(
line=line,
arg="-flip",
default=False,
modifiers=self.modifiers,
mods=self.mods,
)
network_configs = self.getter.get_network(nodes=self.nodes)
return util.Future(
self.view.show_config,
"Network Configuration",
network_configs,
self.cluster,
title_every_nth=title_every_nth,
flip_output=flip_output,
**self.mods,
)
@CommandHelp("Displays namespace configuration")
def do_namespace(self, line):
title_every_nth = util.get_arg_and_delete_from_mods(
line=line,
arg="-r",
return_type=int,
default=0,
modifiers=self.modifiers,
mods=self.mods,
)
flip_output = util.check_arg_and_delete_from_mods(
line=line,
arg="-flip",
default=False,
modifiers=self.modifiers,
mods=self.mods,
)
ns_configs = self.getter.get_namespace(
nodes=self.nodes, for_mods=self.mods["for"]
)
return [
util.Future(
self.view.show_config,
"%s Namespace Configuration" % (ns),
configs,
self.cluster,
title_every_nth=title_every_nth,
flip_output=flip_output,
**self.mods,
)
for ns, configs in list(ns_configs.items())
]
@CommandHelp("Displays XDR configuration")
def do_xdr(self, line):
title_every_nth = util.get_arg_and_delete_from_mods(
line=line,
arg="-r",
return_type=int,
default=0,
modifiers=self.modifiers,
mods=self.mods,
)
flip_output = util.check_arg_and_delete_from_mods(
line=line,
arg="-flip",
default=False,
modifiers=self.modifiers,
mods=self.mods,
)
xdr5_configs = self.getter.get_xdr5(nodes=self.nodes)
old_xdr_configs = self.getter.get_old_xdr(nodes=self.nodes)
futures = []
if xdr5_configs:
formatted_configs = common.format_xdr5_configs(
xdr5_configs, self.mods.get("for", [])
)
futures.append(
util.Future(
self.view.show_xdr5_config,
"XDR Configuration",
formatted_configs,
self.cluster,
title_every_nth=title_every_nth,
flip_output=flip_output,
**self.mods,
)
)
if old_xdr_configs:
futures.append(
util.Future(
self.view.show_config,
"XDR Configuration",
old_xdr_configs,
self.cluster,
title_every_nth=title_every_nth,
flip_output=flip_output,
**self.mods,
)
)
return futures
# pre 5.0
@CommandHelp(
"Displays datacenter configuration.",
'Replaced by "show config xdr" for server >= 5.0.',
)
def do_dc(self, line):
builds = util.Future(self.cluster.info_build, nodes=self.nodes).start()
title_every_nth = util.get_arg_and_delete_from_mods(
line=line,
arg="-r",
return_type=int,
default=0,
modifiers=self.modifiers,
mods=self.mods,
)
flip_output = util.check_arg_and_delete_from_mods(
line=line,
arg="-flip",
default=False,
modifiers=self.modifiers,
mods=self.mods,
)
dc_configs = self.getter.get_dc(nodes=self.nodes)
nodes_running_v5_or_higher = False
nodes_running_v49_or_lower = False
builds = builds.result()
node_xdr_build_major_version = 4
for build in builds.values():
try:
node_xdr_build_major_version = int(build[0])
except Exception:
continue
if node_xdr_build_major_version >= 5:
nodes_running_v5_or_higher = True
else:
nodes_running_v49_or_lower = True
futures = []
if nodes_running_v49_or_lower:
futures = [
util.Future(
self.view.show_config,
"%s DC Configuration" % (dc),
configs,
self.cluster,
title_every_nth=title_every_nth,
flip_output=flip_output,
**self.mods,
)
for dc, configs in dc_configs.items()
]
if nodes_running_v5_or_higher:
futures.append(
util.Future(
self.logger.warning,
"Detected nodes running aerospike version >= 5.0. "
+ "Please use 'asadm -e \"show config xdr\"' for versions 5.0 and up.",
)
)
return futures
@CommandHelp("Displays Cluster configuration")
def do_cluster(self, line):
title_every_nth = util.get_arg_and_delete_from_mods(
line=line,
arg="-r",
return_type=int,
default=0,
modifiers=self.modifiers,
mods=self.mods,
)
flip_output = util.check_arg_and_delete_from_mods(
line=line,
arg="-flip",
default=False,
modifiers=self.modifiers,
mods=self.mods,
)
cl_configs = self.getter.get_cluster(nodes=self.nodes)
return util.Future(
self.view.show_config,
"Cluster Configuration",
cl_configs,
self.cluster,
title_every_nth=title_every_nth,
flip_output=flip_output,
**self.mods,
)
@CommandHelp(
'"show mapping" is used to display Aerospike mapping from IP to Node_id and Node_id to IPs'
)
class ShowMappingController(LiveClusterCommandController):
def __init__(self):
self.modifiers = set(["like"])
@CommandHelp("Displays mapping IPs to Node_id and Node_id to IPs")
def _do_default(self, line):
actions = (
util.Future(self.do_ip, line).start(),
util.Future(self.do_node, line).start(),
)
return [action.result() for action in actions]
@CommandHelp("Displays IP to Node_id mapping")
def do_ip(self, line):
ip_to_node_map = self.cluster.get_IP_to_node_map()
return util.Future(
self.view.show_mapping, "IP", "NODE-ID", ip_to_node_map, **self.mods
)
@CommandHelp("Displays Node_id to IPs mapping")
def do_node(self, line):
node_to_ip_map = self.cluster.get_node_to_IP_map()
return util.Future(
self.view.show_mapping, "NODE-ID", "IPs", node_to_ip_map, **self.mods
)
@CommandHelp(
'"show statistics" is used to display statistics for Aerospike components.'
)
class ShowStatisticsController(LiveClusterCommandController):
def __init__(self):
self.modifiers = set(["with", "like", "for"])
self.getter = GetStatisticsController(self.cluster)
@CommandHelp(
"Displays bin, set, service, and namespace statistics",
" Options:",
" -t - Set to show total column at the end. It contains node wise sum for statistics.",
" -r - Repeat output table title and row header after every <terminal width> columns.",
" [default: False, no repetition]",
" -flip - Flip output table to show Nodes on Y axis and stats on X axis.",
)
def _do_default(self, line):
actions = (
util.Future(self.do_bins, line[:]).start(),
util.Future(self.do_sets, line[:]).start(),
util.Future(self.do_service, line[:]).start(),
util.Future(self.do_namespace, line[:]).start(),
)
| |
"""Connects to TAXII servers via cabby and formats the data received for dispatching to a Carbon Black feed."""
import argparse
import logging
import traceback
import urllib3
import copy
import yaml
import os
from cabby.exceptions import NoURIProvidedError, ClientException
from requests.exceptions import ConnectionError
from cbc_sdk.errors import ApiError
from cabby import create_client
from dataclasses import dataclass
from datetime import datetime
from itertools import chain
try:
from threatintel import ThreatIntel
from stix_parse import parse_stix, parse_stix_from_file, BINDING_CHOICES
from feed_helper import FeedHelper
from results import AnalysisResult
# allow for using stix_taxii on its own
except ImportError:
from .threatintel import ThreatIntel
from .stix_parse import parse_stix, BINDING_CHOICES
from .feed_helper import FeedHelper
from .results import AnalysisResult
# logging.basicConfig(filename='stix.log', filemode='w', level=logging.DEBUG)
logging.basicConfig(filename='stix.log', filemode='w',
format='%(asctime)s,%(msecs)d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d:%H:%M:%S',
level=logging.DEBUG)
handled_exceptions = (NoURIProvidedError, ClientException, ConnectionError)
def load_config_from_file():
"""Loads YAML formatted configuration from config.yml in working directory."""
logging.debug("loading config from file")
config_filename = os.path.join(os.path.dirname((os.path.abspath(__file__))), "config.yml")
with open(config_filename, "r") as config_file:
config_data = yaml.load(config_file, Loader=yaml.SafeLoader)
config_data_without_none_vals = copy.deepcopy(config_data)
for site_name, site_config_dict in config_data['sites'].items():
for conf_key, conf_value in site_config_dict.items():
if conf_value is None:
del config_data_without_none_vals['sites'][site_name][conf_key]
logging.info(f"loaded config data: {config_data_without_none_vals}")
return config_data_without_none_vals
@dataclass(eq=True, frozen=True)
class TaxiiSiteConfig:
"""Contains information needed to interface with a TAXII server.
These values are loaded in from config.yml for each entry in the configuration file.
Each TaxiiSiteConnector has its own TaxiiSiteConfig.
"""
feed_id: str = ''
site: str = ''
discovery_path: str = ''
collection_management_path: str = ''
poll_path: str = ''
use_https: bool = True
ssl_verify: bool = True
cert_file: str = None
key_file: str = None
default_score: int = 5 # [1,10]
username: str = None
password: str = None
collections: str = '*'
start_date: str = None
size_of_request_in_minutes: int = 1440
ca_cert: str = None
http_proxy_url: str = None
https_proxy_url: str = None
reports_limit: int = None
fail_limit: int = 10 # num attempts per collection for polling & parsing
class TaxiiSiteConnector():
"""Connects to and pulls data from a TAXII server."""
def __init__(self, site_conf):
"""
Initialize the TaxiiSiteConnector.
Args:
site_conf (dict): Site configuration information.
"""
self.config = TaxiiSiteConfig(**site_conf)
self.client = None
def create_taxii_client(self):
"""Connects to a TAXII server using cabby and configuration entries."""
conf = self.config
if not conf.start_date:
logging.error(f"A start_date is required for site {conf.site}. Exiting.")
return
if not conf.ssl_verify:
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
try:
client = create_client(conf.site,
use_https=conf.use_https,
discovery_path=conf.discovery_path)
client.set_auth(username=conf.username,
password=<PASSWORD>,
verify_ssl=conf.ssl_verify,
ca_cert=conf.ca_cert,
cert_file=conf.cert_file,
key_file=conf.key_file)
proxy_dict = dict()
if conf.http_proxy_url:
proxy_dict['http'] = conf.http_proxy_url
if conf.https_proxy_url:
proxy_dict['https'] = conf.https_proxy_url
if proxy_dict:
client.set_proxies(proxy_dict)
self.client = client
except handled_exceptions as e:
logging.error(f"Error creating client: {e}")
def create_uri(self, config_path):
"""Formats a URI for discovery, collection, or polling of a TAXII server.
Args:
config_path: A URI path to a TAXII server's discovery, collection, or polling service.
Defined in config.yml configuration file.
Returns:
A full URI to one of a TAXII server's service paths.
"""
uri = None
if self.config.site and config_path:
if self.config.use_https:
uri = 'https://'
else:
uri = 'http://'
uri = uri + self.config.site + config_path
return uri
def query_collections(self):
"""Returns a list of STIX collections available to the user to poll."""
collections = []
try:
uri = self.create_uri(self.config.collection_management_path)
collections = self.client.get_collections(
uri=uri) # autodetect if uri=None
for collection in collections:
logging.info(f"Collection: {collection.name}, {collection.type}")
except handled_exceptions as e:
logging.warning(
"Problem fetching collections from TAXII server. Check your TAXII Provider URL and username/password "
f"(if required to access TAXII server): {e}")
return collections
def poll_server(self, collection, feed_helper):
"""
Returns a STIX content block for a specific TAXII collection.
Args:
collection: Name of a TAXII collection to poll.
feed_helper: FeedHelper object.
"""
content_blocks = []
uri = self.create_uri(self.config.poll_path)
try:
logging.info(f"Polling Collection: {collection.name}")
content_blocks = self.client.poll(
uri=uri,
collection_name=collection.name,
begin_date=feed_helper.start_date,
end_date=feed_helper.end_date,
content_bindings=BINDING_CHOICES)
except handled_exceptions as e:
logging.warning(f"problem polling taxii server: {e}")
return content_blocks
def parse_collection_content(self, content_blocks, default_score=None):
"""
Yields a formatted report dictionary for each STIX content_block.
Args:
content_blocks: A chunk of STIX data from the TAXII collection being polled.
default_score: The default score for the data, or None.
"""
if default_score:
score = default_score
else:
score = self.config.default_score
try:
for block in content_blocks:
yield from parse_stix(block.content, score)
except:
# Content Block failed or parsing issue continue with current progress
yield from ()
def import_collection(self, collection):
"""
Polls a single TAXII server collection.
Starting at the start_date set in config.yml, a FeedHelper object will continue to grab chunks
of data from a collection until the report limit is reached or we reach the current datetime.
Args:
collection: Name of a TAXII collection to poll.
Yields:
Formatted report dictionaries from parse_collection_content(content_blocks)
for each content_block pulled from a single TAXII collection.
"""
num_times_empty_content_blocks = 0
advance = True
reports_limit = self.config.reports_limit
if not self.config.size_of_request_in_minutes:
size_of_request_in_minutes = 1440
else:
size_of_request_in_minutes = self.config.size_of_request_in_minutes
feed_helper = FeedHelper(self.config.start_date,
size_of_request_in_minutes)
# config parameters `start_date` and `size_of_request_in_minutes` tell this Feed Helper
# where to start polling in the collection, and then will advance polling in chunks of
# `size_of_request_in_minutes` until we hit the most current `content_block`,
# or reports_limit is reached.
while feed_helper.advance():
num_reports = 0
num_times_empty_content_blocks = 0
content_blocks = self.poll_server(collection, feed_helper)
reports = self.parse_collection_content(content_blocks)
for report in reports:
yield report
num_reports += 1
if reports_limit is not None and num_reports >= reports_limit:
logging.info(f"Reports limit of {self.config.reports_limit} reached")
advance = False
break
if not advance:
break
if collection.type == 'DATA_SET': # data is unordered, not a feed
logging.info(f"collection:{collection}; type data_set; breaking")
break
if num_reports == 0:
num_times_empty_content_blocks += 1
if num_times_empty_content_blocks > self.config.fail_limit:
logging.error('Max fail limit reached; Exiting.')
break
if reports_limit is not None:
reports_limit -= num_reports
def import_collections(self, available_collections):
"""
Polls each desired collection specified in config.yml.
Args:
available_collections: list of collections available to a TAXII server user.
Yields:
From import_collection(self, collection) for each desired collection.
"""
if not self.config.collections:
desired_collections = '*'
else:
desired_collections = self.config.collections
desired_collections = [x.strip()
for x in desired_collections.lower().split(',')]
want_all = True if '*' in desired_collections else False
for collection in available_collections:
if collection.type != 'DATA_FEED' and collection.type != 'DATA_SET':
logging.debug(f"collection:{collection}; type not feed or data")
continue
if not collection.available:
logging.debug(f"collection:{collection} not available")
continue
if want_all or collection.name.lower() in desired_collections:
yield from self.import_collection(collection)
def generate_reports(self):
"""Returns a list of report dictionaries for each desired collection specified in config.yml."""
reports = []
self.create_taxii_client()
if not self.client:
logging.error('Unable to create taxii client.')
return reports
available_collections = self.query_collections()
if not available_collections:
logging.warning('Unable to find any collections.')
return reports
reports = self.import_collections(available_collections)
if not reports:
logging.warning('Unable to import collections.')
return reports
return reports
class StixTaxii():
"""Allows for interfacing with multiple TAXII servers.
Instantiates separate TaxiiSiteConnector objects for each site specified in config.yml.
Formats report dictionaries into AnalysisResult objects with formatted IOC_v2 attirbutes.
Sends AnalysisResult objects to ThreatIntel.push_to_cb for dispatching to a feed.
"""
def __init__(self, site_confs):
"""
Initialize the StixTaxii object.
Args:
site_confs (dict): Site configuration information.
"""
self.config = site_confs
self.client = None
def result(self, **kwargs):
"""Returns a new AnalysisResult with the given fields populated."""
result = AnalysisResult(**kwargs).normalize()
return result
def configure_sites(self):
"""Creates a TaxiiSiteConnector for each site in config.yml"""
self.sites = {}
try:
for site_name, site_conf in self.config['sites'].items():
self.sites[site_name] = TaxiiSiteConnector(site_conf)
logging.info(f"loaded site {site_name}")
except handled_exceptions as e:
logging.error(f"Error in parsing config file: {e}")
def format_report(self, reports):
"""
Converts a dictionary into an AnalysisResult.
Args:
reports: list of report dictionaries containing an id, title, description, timestamp, score, link,
and iocs_v2.
Yields:
An AnalysisResult for each report dictionary.
"""
for report in reports:
try:
analysis_name = report['id']
title = report['title']
description = report['description']
scan_time = datetime.fromtimestamp(report['timestamp'])
score = report['score']
link = report['link']
ioc_dict = report['iocs_v2']
result = self.result(
analysis_name=analysis_name,
scan_time=scan_time,
score=score,
title=title,
description=description)
for ioc_key, ioc_val in ioc_dict.items():
result.attach_ioc_v2(values=ioc_val, field=ioc_key, link=link)
except handled_exceptions as e:
logging.warning(f"Problem in report formatting: {e}")
result = self.result(
analysis_name="exception_format_report", error=True)
yield result
def collect_and_send_reports(self, file_names=None):
"""Collects and sends formatted reports to ThreatIntel.push_to_cb for validation and dispatching to a feed."""
self.configure_sites()
ti = ThreatIntel()
for site_name, site_conn in self.sites.items():
logging.debug(f"Verifying Feed {site_conn.config.feed_id} exists")
try:
ti.verify_feed_exists(site_conn.config.feed_id)
except ApiError as e:
logging.error(
f"Couldn't find Enterprise EDR Feed {site_conn.config.feed_id}. Skipping {site_name}: {e}")
continue
if file_names:
reports = []
try:
report_generators = []
# generate Reports from STIX XML files
for file in file_names:
report_generators.append(parse_stix_from_file(file, site_conn.config.default_score))
| |
== 'alpha' and version[1] == 0)
)
)
)
) and 2 or 1 # NOQA
return self._SKIP_VERSION
if get_skip_version() == 1:
try:
# Django trunk since r7722 uses CollectedObjects instead of dict
from django.db.models.query import CollectedObjects
sub_objects = CollectedObjects()
except ImportError:
# previous versions don't have CollectedObjects
sub_objects = {}
self.instance._collect_sub_objects(sub_objects)
sub_objects = sub_objects.keys()
elif get_skip_version() == 2:
from django.db.models.deletion import Collector
from django.db import router
cls = self.instance.__class__
using = router.db_for_write(cls, instance=self.instance)
collector = Collector(using=using)
collector.collect([self.instance], collect_related=False)
# collector stores its instances in two places. I *think* we
# only need collector.data, but using the batches is needed
# to perfectly emulate the old behaviour
# TODO: check if batches are really needed. If not, remove them.
sub_objects = sum([list(i) for i in collector.data.values()], [])
if hasattr(collector, 'batches'):
# Django 1.6 removed batches for being dead code
# https://github.com/django/django/commit/a170c3f755351beb35f8166ec3c7e9d524d9602
for batch in collector.batches.values():
# batch.values can be sets, which must be converted to lists
sub_objects += sum([list(i) for i in batch.values()], [])
sub_objects_parents = [so._meta.parents for so in sub_objects]
if [self.model in p for p in sub_objects_parents].count(True) == 1:
# since this instance isn't explicitly created, it's variable name
# can't be referenced in the script, so record None in context dict
pk_name = self.instance._meta.pk.name
key = '%s_%s' % (self.model.__name__, getattr(self.instance, pk_name))
self.context[key] = None
self.skip_me = True
else:
self.skip_me = False
return self.skip_me
def instantiate(self):
" Write lines for instantiation "
# e.g. model_name_35 = Model()
code_lines = []
if not self.instantiated:
code_lines.append("%s = %s()" % (self.variable_name, self.model.__name__))
self.instantiated = True
# Store our variable name for future foreign key references
pk_name = self.instance._meta.pk.name
key = '%s_%s' % (self.model.__name__, getattr(self.instance, pk_name))
self.context[key] = self.variable_name
return code_lines
def get_waiting_list(self, force=False):
" Add lines for any waiting fields that can be completed now. "
code_lines = []
skip_autofield = self.options.get('skip_autofield', True)
# Process normal fields
for field in list(self.waiting_list):
try:
# Find the value, add the line, remove from waiting list and move on
value = get_attribute_value(self.instance, field, self.context, force=force, skip_autofield=skip_autofield)
code_lines.append('%s.%s = %s' % (self.variable_name, field.name, value))
self.waiting_list.remove(field)
except SkipValue:
# Remove from the waiting list and move on
self.waiting_list.remove(field)
continue
except DoLater:
# Move on, maybe next time
continue
return code_lines
def get_many_to_many_lines(self, force=False):
""" Generates lines that define many to many relations for this instance. """
lines = []
for field, rel_items in self.many_to_many_waiting_list.items():
for rel_item in list(rel_items):
try:
pk_name = rel_item._meta.pk.name
key = '%s_%s' % (rel_item.__class__.__name__, getattr(rel_item, pk_name))
value = "%s" % self.context[key]
lines.append('%s.%s.add(%s)' % (self.variable_name, field.name, value))
self.many_to_many_waiting_list[field].remove(rel_item)
except KeyError:
if force:
item_locator = orm_item_locator(rel_item)
self.context["__extra_imports"][rel_item._meta.object_name] = rel_item.__module__
lines.append('%s.%s.add( %s )' % (self.variable_name, field.name, item_locator))
self.many_to_many_waiting_list[field].remove(rel_item)
if lines:
lines.append("")
return lines
class Script(Code):
" Produces a complete python script that can recreate data for the given apps. "
def __init__(self, models, context=None, stdout=None, stderr=None, options=None):
super(Script, self).__init__(stdout=stdout, stderr=stderr)
self.imports = {}
self.models = models
if context is None:
context = {}
self.context = context
self.context["__avaliable_models"] = set(models)
self.context["__extra_imports"] = {}
self.options = options
def _queue_models(self, models, context):
""" Works an an appropriate ordering for the models.
This isn't essential, but makes the script look nicer because
more instances can be defined on their first try.
"""
# Max number of cycles allowed before we call it an infinite loop.
MAX_CYCLES = 5
model_queue = []
number_remaining_models = len(models)
allowed_cycles = MAX_CYCLES
while number_remaining_models > 0:
previous_number_remaining_models = number_remaining_models
model = models.pop(0)
# If the model is ready to be processed, add it to the list
if check_dependencies(model, model_queue, context["__avaliable_models"]):
model_class = ModelCode(model=model, context=context, stdout=self.stdout, stderr=self.stderr, options=self.options)
model_queue.append(model_class)
# Otherwise put the model back at the end of the list
else:
models.append(model)
# Check for infinite loops.
# This means there is a cyclic foreign key structure
# That cannot be resolved by re-ordering
number_remaining_models = len(models)
if number_remaining_models == previous_number_remaining_models:
allowed_cycles -= 1
if allowed_cycles <= 0:
# Add the remaining models, but do not remove them from the model list
missing_models = [ModelCode(model=m, context=context, stdout=self.stdout, stderr=self.stderr, options=self.options) for m in models]
model_queue += missing_models
# Replace the models with the model class objects
# (sure, this is a little bit of hackery)
models[:] = missing_models
break
else:
allowed_cycles = MAX_CYCLES
return model_queue
def get_lines(self):
""" Returns a list of lists or strings, representing the code body.
Each list is a block, each string is a statement.
"""
code = [self.FILE_HEADER.strip()]
# Queue and process the required models
for model_class in self._queue_models(self.models, context=self.context):
msg = 'Processing model: %s\n' % model_class.model.__name__
self.stderr.write(msg)
code.append(" # " + msg)
code.append(model_class.import_lines)
code.append("")
code.append(model_class.lines)
# Process left over foreign keys from cyclic models
for model in self.models:
msg = 'Re-processing model: %s\n' % model.model.__name__
self.stderr.write(msg)
code.append(" # " + msg)
for instance in model.instances:
if instance.waiting_list or instance.many_to_many_waiting_list:
code.append(instance.get_lines(force=True))
code.insert(1, " # Initial Imports")
code.insert(2, "")
for key, value in self.context["__extra_imports"].items():
code.insert(2, " from %s import %s" % (value, key))
return code
lines = property(get_lines)
# A user-friendly file header
FILE_HEADER = """
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file has been automatically generated.
# Instead of changing it, create a file called import_helper.py
# and put there a class called ImportHelper(object) in it.
#
# This class will be specially casted so that instead of extending object,
# it will actually extend the class BasicImportHelper()
#
# That means you just have to overload the methods you want to
# change, leaving the other ones inteact.
#
# Something that you might want to do is use transactions, for example.
#
# Also, don't forget to add the necessary Django imports.
#
# This file was generated with the following command:
# %s
#
# to restore it, run
# manage.py runscript module_name.this_script_name
#
# example: if manage.py is at ./manage.py
# and the script is at ./some_folder/some_script.py
# you must make sure ./some_folder/__init__.py exists
# and run ./manage.py runscript some_folder.some_script
from django.db import transaction
class BasicImportHelper(object):
def pre_import(self):
pass
# You probably want to uncomment on of these two lines
# @transaction.atomic # Django 1.6
# @transaction.commit_on_success # Django <1.6
def run_import(self, import_data):
import_data()
def post_import(self):
pass
def locate_similar(self, current_object, search_data):
# You will probably want to call this method from save_or_locate()
# Example:
# new_obj = self.locate_similar(the_obj, {"national_id": the_obj.national_id } )
the_obj = current_object.__class__.objects.get(**search_data)
return the_obj
def locate_object(self, original_class, original_pk_name, the_class, pk_name, pk_value, obj_content):
# You may change this function to do specific lookup for specific objects
#
# original_class class of the django orm's object that needs to be located
# original_pk_name the primary key of original_class
# the_class parent class of original_class which contains obj_content
# pk_name the primary key of original_class
# pk_value value of the primary_key
# obj_content content of the object which was not exported.
#
# You should use obj_content to locate the object on the target db
#
# An example where original_class and the_class are different is
# when original_class is Farmer and the_class is Person. The table
# may refer to a Farmer but you will actually need to locate Person
# in order to instantiate that Farmer
#
# Example:
# if the_class == SurveyResultFormat or the_class == SurveyType or the_class == SurveyState:
# pk_name="name"
# pk_value=obj_content[pk_name]
# if the_class == StaffGroup:
# pk_value=8
search_data = { pk_name: pk_value }
the_obj = the_class.objects.get(**search_data)
#print(the_obj)
return the_obj
def save_or_locate(self, the_obj):
# Change this if you want to locate the object in the database
try:
the_obj.save()
except:
print("---------------")
print("Error saving the following object:")
print(the_obj.__class__)
print(" ")
print(the_obj.__dict__)
print(" ")
print(the_obj)
print(" ")
print("---------------")
raise
return the_obj
importer = None
try:
import import_helper
# We need this so ImportHelper can extend BasicImportHelper, although import_helper.py
# has no knowlodge of this class
importer = type("DynamicImportHelper", (import_helper.ImportHelper, BasicImportHelper ) , {} )()
except ImportError as e:
# From Python 3.3 we can check e.name - string match is for backward compatibility.
if 'import_helper' in str(e):
importer = BasicImportHelper()
else:
raise
import datetime
from decimal import Decimal
from django.contrib.contenttypes.models import ContentType
try:
import dateutil.parser
except ImportError:
| |
for p in root.self_and_parents(copy=False):
if p.isCloned():
clones.append(p.v)
if clones:
for p in c.all_positions(copy=False):
if predicate(p):
# Match if any node in p's tree matches any clone.
for p2 in p.self_and_subtree():
if p2.v in clones:
return [p.copy()]
return []
#@+node:tbrown.20140311095634.15188: *3* g.recursiveUNLSearch & helper
def recursiveUNLSearch(unlList, c, depth=0, p=None, maxdepth=0, maxp=None,
soft_idx=False, hard_idx=False):
"""try and move to unl in the commander c
All parameters passed on to recursiveUNLFind(), see that for docs.
NOTE: maxdepth is max depth seen in recursion so far, not a limit on
how far we will recurse. So it should default to 0 (zero).
"""
if g.unitTesting:
g.app.unitTestDict['g.recursiveUNLSearch'] = True
return True, maxdepth, maxp
def moveToP(c, p, unlList):
def focus_callback(timer, c=c, p=p.copy(), unlList=unlList):
'''Idle-time handler for g.recursiveUNLSearch'''
c.expandAllAncestors(p)
c.selectPosition(p)
nth_sib, nth_same, nth_line_no, nth_col_no = recursiveUNLParts(unlList[-1])
if nth_line_no:
if nth_line_no < 0:
c.goToLineNumber(-nth_line_no)
if nth_col_no:
pos = c.frame.body.wrapper.getInsertPoint() + nth_col_no
c.frame.body.wrapper.setInsertPoint(pos)
else:
pos = sum(len(i)+1 for i in p.b.split('\n')[:nth_line_no-1])
if nth_col_no:
pos += nth_col_no
c.frame.body.wrapper.setInsertPoint(pos)
if p.hasChildren():
p.expand()
# n = min(3, p.numberOfChildren())
c.redraw()
c.frame.bringToFront()
c.bodyWantsFocusNow()
timer.stop()
timer = g.IdleTime(focus_callback, delay=0.1, tag='g.recursiveUNLSearch')
if timer: timer.start()
found, maxdepth, maxp = recursiveUNLFind(
unlList, c, depth, p, maxdepth, maxp,
soft_idx=soft_idx, hard_idx=hard_idx
)
if maxp:
moveToP(c, maxp, unlList)
return found, maxdepth, maxp
#@+node:ekr.20140711071454.17654: *4* g.recursiveUNLFind
def recursiveUNLFind(unlList, c, depth=0, p=None, maxdepth=0, maxp=None,
soft_idx=False, hard_idx=False):
"""
Internal part of recursiveUNLSearch which doesn't change the
selected position or call c.frame.bringToFront()
returns found, depth, p, where:
- found is True if a full match was found
- depth is the depth of the best match
- p is the position of the best match
NOTE: maxdepth is max depth seen in recursion so far, not a limit on
how far we will recurse. So it should default to 0 (zero).
- `unlList`: list of 'headline', 'headline:N', or 'headline:N,M'
elements, where N is the node's position index and M the zero based
count of like named nodes, eg. 'foo:2', 'foo:4,1', 'foo:12,3'
- `c`: outline
- `soft_idx`: use index when matching name not found
- `hard_idx`: use only indexes, ignore node names
- `depth`: part of recursion, don't set explicitly
- `p`: part of recursion, don't set explicitly
- `maxdepth`: part of recursion, don't set explicitly
- `maxp`: part of recursion, don't set explicitly
"""
if depth == 0:
nds = list(c.rootPosition().self_and_siblings())
unlList = [i.replace('--%3E', '-->') for i in unlList if i.strip()]
# drop empty parts so "-->node name" works
else:
nds = list(p.children())
heads = [i.h for i in nds]
# work out order in which to try nodes
order = []
nth_sib = nth_same = nth_line_no = nth_col_no = None
try:
target = unlList[depth]
except IndexError:
target = ''
try:
target = pos_pattern.sub('', unlList[depth])
nth_sib, nth_same, nth_line_no, nth_col_no = recursiveUNLParts(unlList[depth])
pos = nth_sib is not None
except IndexError:
# #36.
pos = False
if pos:
use_idx_mode = True # ok to use hard/soft_idx
target = re.sub(pos_pattern, "", target).replace('--%3E', '-->')
if hard_idx:
if nth_sib < len(heads):
order.append(nth_sib)
else:
# First we try the nth node with same header
if nth_same:
nths = [n for n, i in enumerate(heads) if i == target]
if nth_same < len(nths) and heads[nths[nth_same]] == target:
order.append(nths[nth_same])
# Then we try *all* other nodes with same header
order += [n for n, s in enumerate(heads)
if n not in order and s == target]
# Then position based, if requested
if soft_idx and nth_sib < len(heads):
order.append(nth_sib)
elif hard_idx:
pass # hard_idx mode with no idx in unl, go with empty order list
else:
order = range(len(nds))
target = target.replace('--%3E', '-->')
use_idx_mode = False # not ok to use hard/soft_idx
# note, the above also fixes calling with soft_idx=True and an old UNL
for ndi in order:
nd = nds[ndi]
if (
target == nd.h or
(use_idx_mode and (soft_idx or hard_idx) and ndi == nth_sib)
):
if depth + 1 == len(unlList): # found it
return True, maxdepth, nd
if maxdepth < depth + 1:
maxdepth = depth + 1
maxp = nd.copy()
found, maxdepth, maxp = g.recursiveUNLFind(
unlList, c, depth + 1, nd,
maxdepth, maxp, soft_idx=soft_idx, hard_idx=hard_idx)
if found:
return found, maxdepth, maxp
# else keep looking through nds
if depth == 0 and maxp: # inexact match
g.es('Partial UNL match')
if soft_idx and depth + 2 < len(unlList):
aList = []
for p in c.all_unique_positions():
if any([p.h.replace('--%3E', '-->') in unl for unl in unlList]):
aList.append((p.copy(), p.get_UNL(False, False, True)))
maxcount = 0
singleMatch = True
for iter_unl in aList:
count = 0
compare_list = unlList[:]
for header in reversed(iter_unl[1].split('-->')):
if (re.sub(pos_pattern, "", header).replace('--%3E', '-->') ==
compare_list[-1]
):
count = count + 1
compare_list.pop(-1)
else:
break
if count > maxcount:
p = iter_unl[0]
singleMatch = True
elif count == maxcount:
singleMatch = False
if maxcount and singleMatch:
maxp = p
maxdepth = p.level()
return False, maxdepth, maxp
#@+node:tbrown.20171221094755.1: *4* g.recursiveUNLParts
pos_pattern = re.compile(r':(\d+),?(\d+)?,?([-\d]+)?,?(\d+)?$')
def recursiveUNLParts(text):
"""recursiveUNLParts - return index, occurence, line_number, col_number
from an UNL fragment. line_number is allowed to be negative to indicate
a "global" line number within the file.
:param str text: the fragment, foo or foo:2 or foo:2,0,4,10
:return: index, occurence, line_number, col_number
:rtype: (int, int, int, int) or (None, None, None, None)
"""
pos = re.findall(pos_pattern, text)
if pos:
return tuple(int(i) if i else 0 for i in pos[0])
return (None, None, None, None)
#@+node:ekr.20031218072017.3156: *3* g.scanError
# It is dubious to bump the Tangle error count here, but it really doesn't hurt.
def scanError(s):
'''Bump the error count in the tangle command.'''
# New in Leo 4.4b1: just set this global.
g.app.scanErrors += 1
g.es('', s)
#@+node:ekr.20031218072017.3157: *3* g.scanf
# A quick and dirty sscanf. Understands only %s and %d.
def scanf(s, pat):
# pylint: disable=anomalous-backslash-in-string
count = pat.count("%s") + pat.count("%d")
pat = pat.replace("%s", "(\S+)")
pat = pat.replace("%d", "(\d+)")
parts = re.split(pat, s)
result = []
for part in parts:
if part and len(result) < count:
result.append(part)
return result
#@+node:ekr.20031218072017.3195: *3* g.splitLines & g.joinLines
def splitLines(s):
'''Split s into lines, preserving the number of lines and
the endings of all lines, including the last line.'''
# g.stat()
if s:
return s.splitlines(True)
# This is a Python string function!
return []
splitlines = splitLines
def joinLines(aList):
return ''.join(aList)
joinlines = joinLines
#@+node:ekr.20031218072017.3158: *3* Scanners: calling scanError
#@+at These scanners all call g.scanError() directly or indirectly, so they
# will call g.es if they find an error. g.scanError() also bumps
# c.tangleCommands.errors, which is harmless if we aren't tangling, and
# useful if we are.
#
# These routines are called by the Import routines and the Tangle routines.
#@+node:ekr.20031218072017.3159: *4* skip_block_comment
# Scans past a block comment (an old_style C comment).
def skip_block_comment(s, i):
assert(g.match(s, i, "/*"))
j = i; i += 2; n = len(s)
k = s.find("*/", i)
if k == -1:
g.scanError("Run on block comment: " + s[j: i])
return n
return k + 2
#@+node:ekr.20031218072017.3160: *4* skip_braces
#@+at This code is called only from the import logic, so we are allowed to
# try some tricks. In particular, we assume all braces are matched in
# if blocks.
#@@c
def skip_braces(s, i):
'''Skips from the opening to the matching brace.
If no matching is found i is set to len(s)'''
# start = g.get_line(s,i)
assert(g.match(s, i, '{'))
level = 0; n = len(s)
while i < n:
c = s[i]
if c == '{':
level += 1; i += 1
elif c == '}':
level -= 1
if level <= 0: return i
i += 1
elif c == '\'' or c == '"': i = g.skip_string(s, i)
elif g.match(s, i, '//'): i = g.skip_to_end_of_line(s, i)
elif g.match(s, i, '/*'): i = g.skip_block_comment(s, i)
# 7/29/02: be more careful handling conditional code.
elif g.match_word(s, i, "#if") or g.match_word(s, i, "#ifdef") or g.match_word(s, i, "#ifndef"):
i, delta = g.skip_pp_if(s, i)
level += delta
else: i += 1
return i
#@+node:ekr.20031218072017.3162: *4* skip_parens
def skip_parens(s, i):
'''Skips from the opening ( to the matching ).
If no matching is found i is set to len(s)'''
level = 0; n = len(s)
| |
== pygame.K_2:
if midiplay == 1:
midi_notes = []
midiout.send_message([176, 123, 0])
midiplay = 0
odabrana_lista_nota = [i for i in lista_nota if findNote(i,obj_cursor.pozicija, obj_cursor.trajanje)]
lista_nota_sorted = []
lista_nota_index = []
kljucevi_nota = {}
midi_notes = []
for i in odabrana_lista_nota:
lista_nota_index.append(i.pozicija)
lista_nota_sorted = list(set(lista_nota_index))
lista_nota_sorted.sort()
#empty list from note indexes
for i in lista_nota_sorted:
kljucevi_nota[i] = []
#add notes with indexes to the hash sublist
for i in odabrana_lista_nota:
kljucevi_nota[i.pozicija].append(i)
#sort the hash.list with ton object
for i in kljucevi_nota:
kljucevi_nota[i].sort(key=operator.attrgetter('ton'))
#add and play
for i in lista_nota_sorted:
#for y in kljucevi_nota[i]:
# #print(y.pozicija, y.ton, y.trajanje)
if len(kljucevi_nota[i]) > 1:
midi_notes.append([kljucevi_nota[i][2],time.clock(), 0])
else:
midi_notes.append([kljucevi_nota[i][0],time.clock(), 0])
#play only third voice
if event.key == pygame.K_3:
if midiplay == 1:
midi_notes = []
midiout.send_message([176, 123, 0])
midiplay = 0
odabrana_lista_nota = [i for i in lista_nota if findNote(i,obj_cursor.pozicija, obj_cursor.trajanje)]
lista_nota_sorted = []
lista_nota_index = []
kljucevi_nota = {}
midi_notes = []
for i in odabrana_lista_nota:
lista_nota_index.append(i.pozicija)
lista_nota_sorted = list(set(lista_nota_index))
lista_nota_sorted.sort()
#empty list from note indexes
for i in lista_nota_sorted:
kljucevi_nota[i] = []
#add notes with indexes to the hash sublist
for i in odabrana_lista_nota:
kljucevi_nota[i.pozicija].append(i)
#sort the hash.list with ton object
for i in kljucevi_nota:
kljucevi_nota[i].sort(key=operator.attrgetter('ton'))
#add and play
for i in lista_nota_sorted:
#for y in kljucevi_nota[i]:
# #print(y.pozicija, y.ton, y.trajanje)
if len(kljucevi_nota[i]) > 1:
midi_notes.append([kljucevi_nota[i][1],time.clock(), 0])
else:
midi_notes.append([kljucevi_nota[i][0],time.clock(), 0])
#play only forth voice
if event.key == pygame.K_4:
if midiplay == 1:
midi_notes = []
midiout.send_message([176, 123, 0])
midiplay = 0
odabrana_lista_nota = [i for i in lista_nota if findNote(i,obj_cursor.pozicija, obj_cursor.trajanje)]
lista_nota_sorted = []
lista_nota_index = []
kljucevi_nota = {}
midi_notes = []
for i in odabrana_lista_nota:
lista_nota_index.append(i.pozicija)
lista_nota_sorted = list(set(lista_nota_index))
lista_nota_sorted.sort()
#empty list from note indexes
for i in lista_nota_sorted:
kljucevi_nota[i] = []
#add notes with indexes to the hash sublist
for i in odabrana_lista_nota:
kljucevi_nota[i.pozicija].append(i)
#sort the hash.list with ton object
for i in kljucevi_nota:
kljucevi_nota[i].sort(key=operator.attrgetter('ton'))
#add and play
for i in lista_nota_sorted:
#for y in kljucevi_nota[i]:
# #print(y.pozicija, y.ton, y.trajanje)
if len(kljucevi_nota[i]) > 1:
midi_notes.append([kljucevi_nota[i][0],time.clock(), 0])
else:
midi_notes.append([kljucevi_nota[i][0],time.clock(), 0])
if event.key == pygame.K_UP:
x = [i for i in lista_nota if findNote(i,obj_cursor.pozicija, obj_cursor.trajanje)]
if x:
for i in x:
if i in lista_nota and (obj_cursor.ton == i.ton):
i.ton += 7
if event.key == pygame.K_DOWN:
x = [i for i in lista_nota if findNote(i,obj_cursor.pozicija, obj_cursor.trajanje)]
if x:
for i in x:
if i in lista_nota and (obj_cursor.ton == i.ton):
i.ton -= 7
# obj_cursor.ton -= 7
if event.key == pygame.K_UP:
x = [i for i in lista_nota if findNote(i,obj_cursor.pozicija, obj_cursor.trajanje)]
if x:
for i in x:
if i in lista_nota and (obj_cursor.ton == i.ton):
i.ton += 8
if event.key == pygame.K_DOWN:
x = [i for i in lista_nota if findNote(i,obj_cursor.pozicija, obj_cursor.trajanje)]
if x:
for i in x:
if i in lista_nota and (obj_cursor.ton == i.ton):
i.ton -= 8
if chord_mode:
if event.key == pygame.K_RIGHT:
obj_cursor.pozicija += 16
if event.key == pygame.K_LEFT:
if obj_cursor.pozicija > -15:
obj_cursor.pozicija -= 16
#Keyboard buttons with LALT as mod
if pygame.key.get_mods() & pygame.KMOD_LALT:
if not modes():
pass
if old_mode:
obj_cursor.sprite = 1
if event.key == pygame.K_UP:
if obj_cursor.bg_scroll_y < 8:
obj_cursor.bg_scroll_y +=1
obj_cursor.ton +=1
if event.key == pygame.K_DOWN:
if obj_cursor.bg_scroll_y > -8:
obj_cursor.bg_scroll_y -=1
obj_cursor.ton -=1
if event.key == pygame.K_LEFT:
obj_cursor.bg_scroll_x -=1
if event.key == pygame.K_RIGHT:
obj_cursor.bg_scroll_x +=1
#delete all but first voice
if event.key == pygame.K_1:
if midiplay == 1:
midi_notes = []
midiout.send_message([176, 123, 0])
midiplay = 0
lista_nota_sorted = []
lista_nota_index = []
kljucevi_nota = {}
midi_notes = []
for i in lista_nota:
lista_nota_index.append(i.pozicija)
lista_nota_sorted = list(set(lista_nota_index))
lista_nota_sorted.sort()
#empty list from note indexes
for i in lista_nota_sorted:
kljucevi_nota[i] = []
#add notes with indexes to the hash sublist
for i in lista_nota:
kljucevi_nota[i.pozicija].append(i)
#sort the hash.list with ton object
for i in kljucevi_nota:
kljucevi_nota[i].sort(key=operator.attrgetter('ton'))
#add and play
backup_lista_nota = lista_nota
lista_nota = []
for i in lista_nota_sorted:
#for y in kljucevi_nota[i]:
# #print(y.pozicija, y.ton, y.trajanje)
if len(kljucevi_nota[i]) > 1:
lista_nota.append(kljucevi_nota[i][3])
else:
lista_nota.append(kljucevi_nota[i][0])
#delete all but second voice
if event.key == pygame.K_2:
if midiplay == 1:
midi_notes = []
midiout.send_message([176, 123, 0])
midiplay = 0
lista_nota_sorted = []
lista_nota_index = []
kljucevi_nota = {}
midi_notes = []
for i in lista_nota:
lista_nota_index.append(i.pozicija)
lista_nota_sorted = list(set(lista_nota_index))
lista_nota_sorted.sort()
#empty list from note indexes
for i in lista_nota_sorted:
kljucevi_nota[i] = []
#add notes with indexes to the hash sublist
for i in lista_nota:
kljucevi_nota[i.pozicija].append(i)
#sort the hash.list with ton object
for i in kljucevi_nota:
kljucevi_nota[i].sort(key=operator.attrgetter('ton'))
#add and play
backup_lista_nota = lista_nota
lista_nota = []
for i in lista_nota_sorted:
#for y in kljucevi_nota[i]:
# #print(y.pozicija, y.ton, y.trajanje)
if len(kljucevi_nota[i]) > 1:
lista_nota.append(kljucevi_nota[i][2])
else:
lista_nota.append(kljucevi_nota[i][0])
#delete all but third voice
if event.key == pygame.K_3:
if midiplay == 1:
midi_notes = []
midiout.send_message([176, 123, 0])
midiplay = 0
lista_nota_sorted = []
lista_nota_index = []
kljucevi_nota = {}
midi_notes = []
for i in lista_nota:
lista_nota_index.append(i.pozicija)
lista_nota_sorted = list(set(lista_nota_index))
lista_nota_sorted.sort()
#empty list from note indexes
for i in lista_nota_sorted:
kljucevi_nota[i] = []
#add notes with indexes to the hash sublist
for i in lista_nota:
kljucevi_nota[i.pozicija].append(i)
#sort the hash.list with ton object
for i in kljucevi_nota:
kljucevi_nota[i].sort(key=operator.attrgetter('ton'))
#add and play
backup_lista_nota = lista_nota
lista_nota = []
for i in lista_nota_sorted:
#for y in kljucevi_nota[i]:
# #print(y.pozicija, y.ton, y.trajanje)
if len(kljucevi_nota[i]) > 1:
lista_nota.append(kljucevi_nota[i][1])
else:
lista_nota.append(kljucevi_nota[i][0])
#delete all but forth voice
if event.key == pygame.K_4:
if midiplay == 1:
midi_notes = []
midiout.send_message([176, 123, 0])
midiplay = 0
lista_nota_sorted = []
lista_nota_index = []
kljucevi_nota = {}
midi_notes = []
for i in lista_nota:
lista_nota_index.append(i.pozicija)
lista_nota_sorted = list(set(lista_nota_index))
lista_nota_sorted.sort()
#empty list from note indexes
for i in lista_nota_sorted:
kljucevi_nota[i] = []
#add notes with indexes to the hash sublist
for i in lista_nota:
kljucevi_nota[i.pozicija].append(i)
#sort the hash.list with ton object
for i in kljucevi_nota:
kljucevi_nota[i].sort(key=operator.attrgetter('ton'))
#add and play
backup_lista_nota = lista_nota
lista_nota = []
for i in lista_nota_sorted:
#for y in kljucevi_nota[i]:
# #print(y.pozicija, y.ton, y.trajanje)
if len(kljucevi_nota[i]) > 1:
lista_nota.append(kljucevi_nota[i][0])
else:
lista_nota.append(kljucevi_nota[i][0])
#recover backup of lista_nota back
if event.key == pygame.K_0:
lista_nota = backup_lista_nota
if event.type == pygame.KEYUP:
if pygame.key.get_mods()==0 & pygame.KMOD_LSHIFT:
obj_cursor.sprite = 0
#shift_status = 0
#left = 0
#right = 0
# playing midi notes ###############################################################
if midi_notes:
midiplay = 1
swap_pozicija = obj_cursor.pozicija
for i in midi_notes:
start_point = (i[0].pozicija - swap_pozicija)*(60/tempo/4) + i[1]
#print(start_point, end_point)
end_point = (i[0].pozicija - swap_pozicija + i[0].trajanje + 1)*(60/tempo/4) + i[1]
if (i[2] == 0 and (time.clock() >= start_point)):
i[2] = 1
#print(str(nota2MidiNumber(i[0])) + " on")
midiout.send_message([144, nota2MidiNumber(i[0]), 100])
#print(time.clock())
if (i[2] == 1 and (time.clock() >= end_point)):
print(str(nota2MidiNumber(i[0])) + " off")
midiout.send_message([144, nota2MidiNumber(i[0]), 0])
midi_notes.remove(i)
else:
midiplay = 0
# bliting #########################################################################
#racunanje bg_scroll-a
#pozicija cursora u svakom trenutku ovisno na okvir screen-a
obj_cursor.apsolute_y = obj_cursor.ton - 20 - (obj_cursor.bg_scroll_y)
obj_cursor.apsolute_x = obj_cursor.pozicija - (obj_cursor.bg_scroll_x) - fake_scroll
if (int(obj_cursor.apsolute_x) - int(abs(fake_scroll))) == 2:
fake_scroll = 0
if obj_cursor.apsolute_y > 12:
obj_cursor.bg_scroll_y += 1
elif obj_cursor.apsolute_y < -12:
obj_cursor.bg_scroll_y -= 1
if obj_cursor.apsolute_x < 0:
#obj_cursor.bg_scroll_x -=1
x = (obj_cursor.apsolute_x - 1)
#obj_cursor.bg_scroll_x -= round(math.log(abs(x))*0.6, 1)
obj_cursor.bg_scroll_x -= round(math.log(abs(x))*8, 1)
ajdemi()
elif obj_cursor.apsolute_x + obj_cursor.trajanje > 22:
x = (obj_cursor.apsolute_x + obj_cursor.trajanje - 21)
#obj_cursor.bg_scroll_x += round(math.log(x)*0.6, 1)
obj_cursor.bg_scroll_x += round(math.log(x)*8, 1)
#print(round(x, 1))
ajdemi()
bg_scroll_x = obj_cursor.bg_scroll_x * 6
bg_scroll_y = obj_cursor.bg_scroll_y * 3
#flipanje
screen.fill(color_white)
blit_prvi_takt(18-bg_scroll_x,bg_scroll_y-15+30)
#if drugi_takt_lijevi-bg_scroll_x < 67:
for i in range(0, broj_taktova):
blit_drugi_takt(drugi_takt_desni+i*96-bg_scroll_x,bg_scroll_y-15+30)
blit_zadnji_takt(drugi_takt_desni+broj_taktova*96-bg_scroll_x,bg_scroll_y-15+30)
for i in lista_nota:
pygame.draw.rect(screen, lista_boja[i.predikat*2], [(pozicija2Pixel(i.pozicija)+2-bg_scroll_x)*display_scale_factor,(ton2Pixel(i.ton)+2+bg_scroll_y)*display_scale_factor,(trajanje2Pixel(i.trajanje)-1)*display_scale_factor,3*display_scale_factor] )
pygame.draw.rect(screen, lista_boja[i.predikat*2+1], [(pozicija2Pixel(i.pozicija)+3-bg_scroll_x)*display_scale_factor,(ton2Pixel(i.ton)+3+bg_scroll_y+predikati[i.predikat])*display_scale_factor,(trajanje2Pixel(i.trajanje)-3)*display_scale_factor,(3-2)*display_scale_factor] )
#show ligatures
if i.ligatura == True:
if [x for x in lista_nota if ((x.pozicija == (i.pozicija + i.trajanje + 1)) and (x.ton == i.ton) and (x.predikat == i.predikat))]:
pygame.draw.rect(screen, boja_note_vani, [(pozicija2Pixel(i.pozicija)+2-bg_scroll_x+trajanje2Pixel(i.trajanje)-1)*display_scale_factor,(ton2Pixel(i.ton)+2+bg_scroll_y+1)*display_scale_factor,3*display_scale_factor,1*display_scale_factor] )
else:
pygame.draw.rect(screen, boja_note_vani, [(pozicija2Pixel(i.pozicija)+2-bg_scroll_x+trajanje2Pixel(i.trajanje)-1)*display_scale_factor,(ton2Pixel(i.ton)+2+bg_scroll_y+1)*display_scale_factor,1*display_scale_factor,1*display_scale_factor] )
#print chordnames on the screen
for chord in list_chords:
#pygame.draw.rect(screen, lista_boja[i.predikat*2], [(pozicija2Pixel(i.pozicija)+2-bg_scroll_x)*display_scale_factor,(ton2Pixel(i.ton)+2+bg_scroll_y)*display_scale_factor,(trajanje2Pixel(i.trajanje)-1)*display_scale_factor,3*display_scale_factor] )
for i,j in enumerate(chord.ton):
blit_slovo((i*4)+(pozicija2Pixel(chord.pozicija)+2-bg_scroll_x),8,slovoPozicija(j))
#print markup on the screen
for markup in list_markup:
#pygame.draw.rect(screen, lista_boja[i.predikat*2], [(pozicija2Pixel(i.pozicija)+2-bg_scroll_x)*display_scale_factor,(ton2Pixel(i.ton)+2+bg_scroll_y)*display_scale_factor,(trajanje2Pixel(i.trajanje)-1)*display_scale_factor,3*display_scale_factor] )
for i,j in enumerate(markup.ton):
blit_slovo((i*4)+(pozicija2Pixel(markup.pozicija)+2-bg_scroll_x),79,slovoPozicija(j))
blit_cursor(pozicija2Pixel(obj_cursor.pozicija)-bg_scroll_x,ton2Pixel(obj_cursor.ton)+bg_scroll_y,pozicija2Pixel(obj_cursor.pozicija)+trajanje2Pixel(obj_cursor.trajanje)-bg_scroll_x,ton2Pixel(obj_cursor.ton)+bg_scroll_y,obj_cursor.sprite)
#show lilypond | |
<reponame>GDGSNF/PXXTF
#!/usr/bin/env python
# -*- coding: utf-8 -*-"
# vim: set expandtab tabstop=4 shiftwidth=4:
"""
$Id$
This file is part of the xsser project, http://xsser.03c8.net
Copyright (c) 2011/2016 psy <<EMAIL>>
xsser is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation version 3 of the License.
xsser is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
details.
You should have received a copy of the GNU General Public License along
with xsser; if not, write to the Free Software Foundation, Inc., 51
Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
import os
import gtk
import user
import gobject
from core.reporter import XSSerReporter
from core.curlcontrol import Curl
from glib import markup_escape_text
from collections import defaultdict
from threading import Thread
import traceback
import urllib.request, urllib.parse, urllib.error
import urllib.parse
import math
import cairo
import gzip
import pangocairo
import time
class PointType(object):
checked = 15
success = 10
failed = 5
crawled = 0
crashsite = -1
crash_color = [0.1,0.1,0.1]
checked_color = [0,0.8,0.8]
failed_color = [0.8,0.0,0.0]
success_color = [0.0,0.0,0.8]
crawl_color = [0.0,0.0,0.0]
def gtkcol(col):
return [int(col[0]*65535),int(col[1]*65535),int(col[2]*65535)]
class MapPoint(object):
def __init__(self, lat, lng, ptype, size, text): # 0, 5, 10, 15, 20 -> 20==checked
self.latitude = lat
self.longitude = lng
self.size = size
self.text = text
self.reports = defaultdict(list)
self.reports[ptype].append(text)
self.type = ptype
if ptype == PointType.crawled:
self.color = crawl_color
elif ptype == PointType.failed:
self.color = failed_color
elif ptype == PointType.success:
self.color = success_color
elif ptype == PointType.checked:
self.color = checked_color
else:
self.color = crawl_color
self.gtkcolor = gtkcol(self.color)
def add_reports(self, report_type, reports):
for report_type in set(list(reports.keys()) + list(self.reports.keys())):
self.reports[report_type].extend(reports[report_type])
class CrashSite(MapPoint):
def __init__(self, lat, lng, size, desturl):
MapPoint.__init__(self, lat, lng, PointType.crashsite, size, desturl)
class DownloadThread(Thread):
def __init__(self, geomap, parent):
Thread.__init__(self)
self.daemon = True
self._map = geomap
self._parent = parent
def run(self):
geo_db_path = self._map.get_geodb_path()
def reportfunc(current, blocksize, filesize):
percent = min(float(current)/(filesize/float(blocksize)),1.0)
self._parent.report_state('downloading map', percent)
if not os.path.exists(os.path.dirname(geo_db_path)):
os.makedirs(os.path.dirname(geo_db_path))
self._parent.report_state('getting city database', 0.0)
try:
urllib.request.urlretrieve('http://xsser.03c8.net/map/GeoLiteCity.dat.gz',
geo_db_path+'.gz', reportfunc)
except:
try:
urllib.request.urlretrieve('http://xsser.sf.net/map/GeoLiteCity.dat.gz',
geo_db_path+'.gz', reportfunc)
except:
try:
urllib.request.urlretrieve('http://geolite.maxmind.com/download/geoip/database/GeoLiteCity.dat.gz',
geo_db_path+'.gz', reportfunc)
except:
self._parent.report_state('error downloading map', 0.0)
self._map.geomap_failed()
else:
self._parent.report_state('map downloaded (restart XSSer!!!!)', 0.0)
f_in = gzip.open(geo_db_path+'.gz', 'rb')
f_out = open(geo_db_path, 'wb')
f_out.write(f_in.read())
f_in.close()
print('deleting gzipped file')
os.remove(geo_db_path+'.gz')
self._map.geomap_ready()
class GlobalMap(gtk.DrawingArea, XSSerReporter):
def __init__(self, parent, pixbuf, onattack=False):
gtk.DrawingArea.__init__(self)
geo_db_path = self.get_geodb_path()
self._parent = parent
self._pixbuf = pixbuf
self._cache_geo = {}
self.geo = None
self._onattack = onattack
if not os.path.exists(geo_db_path):
self._t = DownloadThread(self, parent)
self._t.start()
else:
self.finish_init()
def geomap_ready(self):
gtk.gdk.threads_enter()
gobject.timeout_add(0, self.finish_init)
gtk.gdk.threads_leave()
def geomap_failed(self):
gtk.gdk.threads_enter()
gobject.timeout_add(0, self.failed_init)
gtk.gdk.threads_leave()
def failed_init(self):
if hasattr(self, '_t'):
self._t.join()
delattr(self, '_t')
def finish_init(self):
import GeoIP
if hasattr(self, '_t'):
self._t.join()
delattr(self, '_t')
parent = self._parent
geo_db_path = self.get_geodb_path()
Geo = GeoIP.open(geo_db_path, GeoIP.GEOIP_STANDARD)
self.geo = Geo
self.set_has_tooltip(True)
self._max_points = 200
self._lasttime = 0.0
self.context = None
self.mapcontext = None
self._mappixbuf = None
self._selected = []
self._current_text = ["", 0.0]
self._stats = [0,0,0,0,0,0,0]
self.width = self._pixbuf.get_width()
self.height = self._pixbuf.get_height()
self._min_x = 0
self._max_x = self.width
self._drawn_points = []
self._lines = []
self._frozenlines = []
self._points = []
self._crosses = []
self.connect("expose_event", self.expose)
self.connect("query-tooltip", self.on_query_tooltip)
if self.window:
self.window.invalidate_rect(self.allocation, True)
if not self._onattack:
self.add_test_points()
def get_geodb_path(self):
ownpath = os.path.dirname(os.path.dirname(__file__))
gtkpath = os.path.join(ownpath, 'gtk')
if os.path.exists(os.path.join(gtkpath, 'GeoLiteCity.dat')):
return os.path.join(gtkpath, 'GeoLiteCity.dat')
else:
return os.path.join(user.home, '.xsser', 'GeoLiteCity.dat')
def find_points(self, x, y, distance=9.0):
points = []
self._selected = []
for idx, point in enumerate(self._drawn_points):
d_x = x-point[0]
d_y = y-point[1]
if d_y*d_y+d_x*d_x < distance:
self._points[point[2]].size = 4.0
points.append(self._points[point[2]])
self._selected.append(point[2])
if points:
rect = gtk.gdk.Rectangle(0,0,self.width, self.height)
self.window.invalidate_rect(rect, True)
return points
def on_query_tooltip(self, widget, x, y, keyboard_mode, tooltip):
if not self.geo:
return False
points = self.find_points(x, y)
if points:
text = ""
success = []
finalsuccess = []
failures = []
crawls = []
for point in points:
finalsuccess.extend(point.reports[PointType.checked])
success.extend(point.reports[PointType.success])
failures.extend(point.reports[PointType.failed])
crawls.extend(point.reports[PointType.crawled])
if finalsuccess:
text += "<b>browser checked sucesses:</b>\n"
text += "\n".join([markup_escape_text(s) for s in finalsuccess])
if failures or success:
text += "\n"
if success:
text += "<b>sucesses:</b>\n"
text += "\n".join([markup_escape_text(s) for s in success])
if failures:
text += "\n"
if failures:
text += "<b>failures:</b>\n"
text += "\n".join([markup_escape_text(s) for s in failures])
if crawls and not failures and not success:
text += "<b>crawls:</b>\n"
text += "\n".join([markup_escape_text(s) for s in crawls])
tooltip.set_markup(str(text))
return True
return False
def add_test_points(self):
self.add_point(0.0, 0.0)
self.add_point(0.0, 5.0)
self.add_point(0.0, 10.0)
self.add_point(0.0, 15.0)
self.add_point(5.0, 0.0)
self.add_point(10.0, 0.0)
self.add_point(15.0, 0.0)
def clear(self):
self._points = []
self._lines = []
self.mapcontext = None
self._frozenlines = []
self._crosses = []
self._stats = [0,0,0,0,0,0,0]
def expose(self, widget, event):
if not self.mapcontext:
self._mappixbuf = self._pixbuf.copy()
self.mapsurface = cairo.ImageSurface.create_for_data(self._mappixbuf.get_pixels_array(),
cairo.FORMAT_ARGB32,
self.width,
self.height,
self._pixbuf.get_rowstride())
self.mapcontext = cairo.Context(self.mapsurface)
self.draw_frozen_lines()
self.context = self.window.cairo_create()
self.context.set_source_surface(self.mapsurface)
self.context.rectangle(event.area.x, event.area.y,
event.area.width, event.area.height)
self.context.clip()
self.context.rectangle(event.area.x, event.area.y,
event.area.width, event.area.height)
self.context.fill()
self.context.set_source_color(gtk.gdk.Color(0,0,0))
self._min_x = 5 # we have the scale at the left for now
self._max_x = 0
if self.geo:
self.draw(self.context)
return False
def add_point(self, lng, lat, point_type=PointType.crawled, desturl="testpoint"):
map_point = MapPoint(lat, lng, point_type, 5.0, desturl)
map_point.x, map_point.y = self.plot_point(lat, lng)
self._points.append(map_point)
def add_cross(self, lng, lat, col=[0,0,0], desturl="testpoint"):
for a in self._crosses:
if a.latitude == lat and a.longitude == lng:
return
crash_site = CrashSite(lat, lng, 5.0, desturl)
crash_site.x, crash_site.y = self.plot_point(lat, lng)
self.adjust_bounds(crash_site.x, crash_site.y)
self._crosses.append(crash_site)
self.queue_redraw()
def insert_point(self, lng, lat, col=[0,0,0], desturl="testpoint"):
self._points.insert(0, MapPoint(lat, lng, point_type, 5.0, desturl))
def _preprocess_points(self):
newpoints = defaultdict(list)
for point in self._points:
key = (point.latitude, point.longitude)
newpoints[key].append(point)
self._points = []
for points in newpoints.values():
win_type = points[0]
win_size = points[0]
for point in points[1:]:
if point.type > win_type.type:
win_type = point
if point.size > win_type.size:
win_size = point
self._points.append(win_type)
if win_type != win_size:
self._points.append(win_size)
for point in points:
if not point in [win_size, win_type]:
win_type.add_reports(point.type, point.reports)
if len(self._points) > self._max_points:
self._points = self._points[:self._max_points]
def draw_frozen_lines(self):
for line in self._lines[len(self._frozenlines):]:
if line[4] <= 0.5:
self.draw_line(self.mapcontext, line)
self._frozenlines.append(line)
def draw(self, context, failures=True):
self._preprocess_points()
if self._lasttime == 0:
self._lasttime = time.time()-0.04
currtime = time.time()
timepassed = currtime - self._lasttime
redraw = False
if failures:
self._drawn_points = []
for cross in reversed(self._crosses):
if cross.size > 0.1:
cross.size -= timepassed*2
else:
self._crosses.remove(cross)
if cross.size > 0.1:
redraw = True
self.draw_cross(cross)
for line in reversed(self._lines[len(self._frozenlines):]):
if line[4] > 0.5:
line[4] -= timepassed*2
if line[4] > 0.5:
redraw = True
self.draw_line(self.context, line)
for idx, point in enumerate(self._points):
if point.type >= PointType.success:
if failures:
continue
else:
if not failures:
continue
if point.size > 1.0 and not idx in self._selected:
point.size -= timepassed*2
redraw = True
elif point.size < 1.0:
point.size = 1.0
self.draw_point(point)
x = point.x
y = point.y
self.adjust_bounds(x, y)
self._drawn_points.append([x, y, idx])
stat_f = 1.0
if failures:
mp = self._max_points
self.draw_bar((-45,-160,crawl_color,(self._stats[0]%mp)*stat_f))
self.draw_bar((-45,-155,failed_color,(self._stats[1]%mp)*stat_f))
self.draw_bar((-45,-150,success_color,(self._stats[2]%mp)*stat_f))
self.draw_bar((-45,-145,checked_color,(self._stats[3]%mp)*stat_f))
if int(self._stats[0] / mp):
self.draw_bar((-46,-160,crawl_color,-2-(self._stats[0]/mp)*stat_f))
if int(self._stats[1] / mp):
self.draw_bar((-46,-155,failed_color,-2-(self._stats[1]/mp)*stat_f))
if int(self._stats[2] / mp):
self.draw_bar((-46,-150,success_color,-2-(self._stats[2]/mp)*stat_f))
if int(self._stats[3] / mp):
self.draw_bar((-46,-145,checked_color,-2-(self._stats[3]/mp)*stat_f))
self.draw(context, False)
else:
if self._current_text[1] > 0.0:
self.draw_text(100, self.height-50, self._current_text[0])
self._current_text[1] -= timepassed*4
self._lasttime = currtime
if redraw:
self.queue_redraw()
def adjust_bounds(self, x, y):
if x-20 < self._min_x:
self._min_x = x-20
elif x+20 > self._max_x:
self._max_x = x+20
def draw_text(self, x, y, text):
self.context.save()
self.context.move_to(x, y)
v = (5.0-self._current_text[1])/5.0
self.context.scale(0.1+max(v, 1.0), 0.1+max(v, 1.0))
self.context.set_source_color(gtk.gdk.Color(*gtkcol((v,)*3)))
u = urllib.parse.urlparse(text)
self.context.show_text(u.netloc)
self.context.restore()
def draw_bar(self, point):
if point[3]:
self.context.save()
x, y = self.plot_point(point[0], point[1])
self.context.set_source_rgb(*point[2])
self.context.rectangle(x, y, 5, -(2.0+point[3]))
self.context.fill()
self.context.restore()
return x, y
def draw_line(self, context, line):
if line[4]:
context.save()
x, y = self.plot_point(line[0], line[1])
x2, y2 = self.plot_point(line[2], line[3])
self.adjust_bounds(x, y)
self.adjust_bounds(x2, y2)
context.set_line_width(1.0)
context.set_source_rgba(0.0, 0.0, 0.0, float(line[4])/5.0)
context.move_to(x, y)
context.rel_line_to(x2-x, y2-y)
context.stroke()
context.restore()
def draw_point(self, point):
if point.size:
self.context.save()
self.context.set_source_color(gtk.gdk.Color(*point.gtkcolor))
self.context.translate(point.x, point.y)
self.context.arc(0.0, 0.0, 2.4*point.size, 0, 2*math.pi)
self.context.close_path()
self.context.fill()
self.context.restore()
def draw_cross(self, point):
if point.size:
self.context.save()
self.context.translate(point.x, point.y)
self.context.rotate(point.size)
self.context.set_line_width(0.8*point.size)
self.context.set_source_color(gtk.gdk.Color(*point.gtkcolor))
self.context.move_to(-3*point.size, -3*point.size)
self.context.rel_line_to(6*point.size, 6*point.size)
self.context.stroke()
self.context.move_to(-3*point.size, +3*point.size)
self.context.rel_line_to(6*point.size, -6*point.size)
self.context.stroke()
self.context.restore()
def get_latlon_fromurl(self, url):
parsed_url = urllib.parse.urlparse(url)
split_netloc = parsed_url.netloc.split(":")
if len(split_netloc) == 2:
server_name, port = split_netloc
else:
server_name = parsed_url.netloc
port = None
if server_name in self._cache_geo:
return self._cache_geo[server_name]
Geodata = self.geo.record_by_name(server_name)
if Geodata:
country_name = Geodata['country_name']
longitude = Geodata['longitude']
latitude = Geodata['latitude']
self._cache_geo[server_name] = (latitude, longitude)
return latitude, longitude
def start_attack(self):
self.clear()
def queue_redraw(self):
rect = gtk.gdk.region_rectangle((self._min_x,0,self._max_x-self._min_x,
self.height))
if self.window:
self.window.invalidate_region(rect, True)
del rect
def mosquito_crashed(self, | |
237, 0x1DA, -1, False, -1
)
W110 = SimpleProperty(
PropertyCategory.FUNCTIONAL_REGISTER, PropertyType.WORD, 238, 0x1DC, -1, False, -1
)
W111 = SimpleProperty(
PropertyCategory.FUNCTIONAL_REGISTER, PropertyType.WORD, 239, 0x1DE, -1, False, -1
)
W112 = SimpleProperty(
PropertyCategory.FUNCTIONAL_REGISTER, PropertyType.WORD, 240, 0x1E0, -1, False, -1
)
W113 = SimpleProperty(
PropertyCategory.FUNCTIONAL_REGISTER, PropertyType.WORD, 241, 0x1E2, -1, False, -1
)
W114 = SimpleProperty(
PropertyCategory.FUNCTIONAL_REGISTER, PropertyType.WORD, 242, 0x1E4, -1, False, -1
)
W115 = SimpleProperty(
PropertyCategory.FUNCTIONAL_REGISTER, PropertyType.WORD, 243, 0x1E6, -1, False, -1
)
W116 = SimpleProperty(
PropertyCategory.FUNCTIONAL_REGISTER, PropertyType.WORD, 244, 0x1E8, -1, False, -1
)
W117 = SimpleProperty(
PropertyCategory.FUNCTIONAL_REGISTER, PropertyType.WORD, 245, 0x1EA, -1, False, -1
)
W118 = SimpleProperty(
PropertyCategory.FUNCTIONAL_REGISTER, PropertyType.WORD, 246, 0x1EC, -1, False, -1
)
W119 = SimpleProperty(
PropertyCategory.FUNCTIONAL_REGISTER, PropertyType.WORD, 247, 0x1EE, -1, False, -1
)
W120 = SimpleProperty(
PropertyCategory.FUNCTIONAL_REGISTER, PropertyType.WORD, 248, 0x1F0, -1, False, -1
)
W121 = SimpleProperty(
PropertyCategory.FUNCTIONAL_REGISTER, PropertyType.WORD, 249, 0x1F2, -1, False, -1
)
W122 = SimpleProperty(
PropertyCategory.FUNCTIONAL_REGISTER, PropertyType.WORD, 250, 0x1F4, -1, False, -1
)
W123 = SimpleProperty(
PropertyCategory.FUNCTIONAL_REGISTER, PropertyType.WORD, 251, 0x1F6, -1, False, -1
)
W124 = SimpleProperty(
PropertyCategory.FUNCTIONAL_REGISTER, PropertyType.WORD, 252, 0x1F8, -1, False, -1
)
W125 = SimpleProperty(
PropertyCategory.FUNCTIONAL_REGISTER, PropertyType.WORD, 253, 0x1FA, -1, False, -1
)
W126 = SimpleProperty(
PropertyCategory.FUNCTIONAL_REGISTER, PropertyType.WORD, 254, 0x1FC, -1, False, -1
)
W127 = SimpleProperty(
PropertyCategory.FUNCTIONAL_REGISTER, PropertyType.WORD, 255, 0x1FE, -1, False, -1
)
LW0 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, 0, 0x600, -1, True, 0
)
LW1 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x604, -1, True, 1
)
LW2 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x608, -1, True, 2
)
LW3 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x60C, -1, True, 3
)
LW4 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x610, -1, True, 4
)
LW5 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x614, -1, True, 5
)
LW6 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x618, -1, True, 6
)
LW7 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x61C, -1, True, 7
)
LW8 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x620, -1, True, 8
)
LW9 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x624, -1, True, 9
)
LW10 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x628, -1, True, 10
)
LW11 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x62C, -1, True, 11
)
LW12 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x630, -1, True, 12
)
LW13 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x634, -1, True, 13
)
LW14 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x638, -1, True, 14
)
LW15 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x63C, -1, True, 15
)
LW16 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x640, -1, True, 16
)
LW17 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x644, -1, True, 17
)
LW18 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x648, -1, True, 18
)
LW19 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x64C, -1, True, 19
)
LW20 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x650, -1, True, 20
)
LW21 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x654, -1, True, 21
)
LW22 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x658, -1, True, 22
)
LW23 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x65C, -1, True, 23
)
LW24 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x660, -1, True, 24
)
LW25 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x664, -1, True, 25
)
LW26 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x668, -1, True, 26
)
LW27 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x66C, -1, True, 27
)
LW28 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x670, -1, True, 28
)
LW29 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x674, -1, True, 29
)
LW30 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x678, -1, True, 30
)
LW31 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x67C, -1, True, 31
)
LW32 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x680, -1, True, 32
)
LW33 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x684, -1, True, 33
)
LW34 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x688, -1, True, 34
)
LW35 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x68C, -1, True, 35
)
LW36 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x690, -1, True, 36
)
LW37 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x694, -1, True, 37
)
LW38 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x698, -1, True, 38
)
LW39 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x69C, -1, True, 39
)
LW40 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x6A0, -1, True, 40
)
LW41 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x6A4, -1, True, 41
)
LW42 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x6A8, -1, True, 42
)
LW43 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x6AC, -1, True, 43
)
LW44 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x6B0, -1, True, 44
)
LW45 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x6B4, -1, True, 45
)
LW46 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x6B8, -1, True, 46
)
LW47 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x6BC, -1, True, 47
)
LW48 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x6C0, -1, True, 48
)
LW49 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x6C4, -1, True, 49
)
LW50 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x6C8, -1, True, 50
)
LW51 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x6CC, -1, True, 51
)
LW52 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x6D0, -1, True, 52
)
LW53 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x6D4, -1, True, 53
)
LW54 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x6D8, -1, True, 54
)
LW55 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x6DC, -1, True, 55
)
LW56 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x6E0, -1, True, 56
)
LW57 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x6E4, -1, True, 57
)
LW58 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x6E8, -1, True, 58
)
LW59 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x6EC, -1, True, 59
)
LW60 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x6F0, -1, True, 60
)
LW61 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x6F4, -1, True, 61
)
LW62 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x6F8, -1, True, 62
)
LW63 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x6FC, -1, True, 63
)
LW64 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x700, -1, True, 64
)
LW65 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x704, -1, True, 65
)
LW66 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x708, -1, True, 66
)
LW67 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x70C, -1, True, 67
)
LW68 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x710, -1, True, 68
)
LW69 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x714, -1, True, 69
)
LW70 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x718, -1, True, 70
)
LW71 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x71C, -1, True, 71
)
LW72 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x720, -1, True, 72
)
LW73 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x724, -1, True, 73
)
LW74 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x728, -1, True, 74
)
LW75 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x72C, -1, True, 75
)
LW76 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x730, -1, True, 76
)
LW77 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x734, -1, True, 77
)
LW78 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x738, -1, True, 78
)
LW79 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x73C, -1, True, 79
)
LW80 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x740, -1, True, 80
)
LW81 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x744, -1, True, 81
)
LW82 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x748, -1, True, 82
)
LW83 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x74C, -1, True, 83
)
LW84 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x750, -1, True, 84
)
LW85 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x754, -1, True, 85
)
LW86 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x758, -1, True, 86
)
LW87 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x75C, -1, True, 87
)
LW88 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x760, -1, True, 88
)
LW89 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x764, -1, True, 89
)
LW90 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x768, -1, True, 90
)
LW91 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x76C, -1, True, 91
)
LW92 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x770, -1, True, 92
)
LW93 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x774, -1, True, 93
)
LW94 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x778, -1, True, 94
)
LW95 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x77C, -1, True, 95
)
LW96 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x780, -1, True, 96
)
LW97 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x784, -1, True, 97
)
LW98 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x788, -1, True, 98
)
LW99 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x78C, -1, True, 99
)
LW100 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x790, -1, True, 100
)
LW101 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x794, -1, True, 101
)
LW102 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x798, -1, True, 102
)
LW103 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x79C, -1, True, 103
)
LW104 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x7A0, -1, True, 104
)
LW105 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x7A4, -1, True, 105
)
LW106 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x7A8, -1, True, 106
)
LW107 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x7AC, -1, True, 107
)
LW108 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x7B0, -1, True, 108
)
LW109 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x7B4, -1, True, 109
)
LW110 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x7B8, -1, True, 110
)
LW111 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x7BC, -1, True, 111
)
LW112 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x7C0, -1, True, 112
)
LW113 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x7C4, -1, True, 113
)
LW114 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x7C8, -1, True, 114
)
LW115 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x7CC, -1, True, 115
)
LW116 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x7D0, -1, True, 116
)
LW117 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x7D4, -1, True, 117
)
LW118 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x7D8, -1, True, 118
)
LW119 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x7DC, -1, True, 119
)
LW120 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x7E0, -1, True, 120
)
LW121 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x7E4, -1, True, 121
)
LW122 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x7E8, -1, True, 122
)
LW123 = SimpleProperty(
PropertyCategory.USER_REGISTER, PropertyType.LONG_WORD, -1, 0x7EC, | |
<filename>lib/twisted/web/sux.py
# -*- test-case-name: twisted.web.test.test_xml -*-
#
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
*S*mall, *U*ncomplicated *X*ML.
This is a very simple implementation of XML/HTML as a network
protocol. It is not at all clever. Its main features are that it
does not:
- support namespaces
- mung mnemonic entity references
- validate
- perform *any* external actions (such as fetching URLs or writing files)
under *any* circumstances
- has lots and lots of horrible hacks for supporting broken HTML (as an
option, they're not on by default).
"""
from twisted.internet.protocol import Protocol
from twisted.python.reflect import prefixedMethodNames
# Elements of the three-tuples in the state table.
BEGIN_HANDLER = 0
DO_HANDLER = 1
END_HANDLER = 2
identChars = '.-_:'
lenientIdentChars = identChars + ';+#/%~'
def nop(*args, **kw):
"Do nothing."
def unionlist(*args):
l = []
for x in args:
l.extend(x)
d = dict([(x, 1) for x in l])
return d.keys()
def zipfndict(*args, **kw):
default = kw.get('default', nop)
d = {}
for key in unionlist(*[fndict.keys() for fndict in args]):
d[key] = tuple([x.get(key, default) for x in args])
return d
def prefixedMethodClassDict(clazz, prefix):
return dict([(name, getattr(clazz, prefix + name)) for name in prefixedMethodNames(clazz, prefix)])
def prefixedMethodObjDict(obj, prefix):
return dict([(name, getattr(obj, prefix + name)) for name in prefixedMethodNames(obj.__class__, prefix)])
class ParseError(Exception):
def __init__(self, filename, line, col, message):
self.filename = filename
self.line = line
self.col = col
self.message = message
def __str__(self):
return "%s:%s:%s: %s" % (self.filename, self.line, self.col,
self.message)
class XMLParser(Protocol):
state = None
encodings = None
filename = "<xml />"
beExtremelyLenient = 0
_prepend = None
# _leadingBodyData will sometimes be set before switching to the
# 'bodydata' state, when we "accidentally" read a byte of bodydata
# in a different state.
_leadingBodyData = None
def connectionMade(self):
self.lineno = 1
self.colno = 0
self.encodings = []
def saveMark(self):
'''Get the line number and column of the last character parsed'''
# This gets replaced during dataReceived, restored afterwards
return (self.lineno, self.colno)
def _parseError(self, message):
raise ParseError(*((self.filename,)+self.saveMark()+(message,)))
def _buildStateTable(self):
'''Return a dictionary of begin, do, end state function tuples'''
# _buildStateTable leaves something to be desired but it does what it
# does.. probably slowly, so I'm doing some evil caching so it doesn't
# get called more than once per class.
stateTable = getattr(self.__class__, '__stateTable', None)
if stateTable is None:
stateTable = self.__class__.__stateTable = zipfndict(
*[prefixedMethodObjDict(self, prefix)
for prefix in ('begin_', 'do_', 'end_')])
return stateTable
def _decode(self, data):
if 'UTF-16' in self.encodings or 'UCS-2' in self.encodings:
assert not len(data) & 1, 'UTF-16 must come in pairs for now'
if self._prepend:
data = self._prepend + data
for encoding in self.encodings:
data = unicode(data, encoding)
return data
def maybeBodyData(self):
if self.endtag:
return 'bodydata'
# Get ready for fun! We're going to allow
# <script>if (foo < bar)</script> to work!
# We do this by making everything between <script> and
# </script> a Text
# BUT <script src="foo"> will be special-cased to do regular,
# lenient behavior, because those may not have </script>
# -radix
if (self.tagName == 'script'
and not self.tagAttributes.has_key('src')):
# we do this ourselves rather than having begin_waitforendscript
# becuase that can get called multiple times and we don't want
# bodydata to get reset other than the first time.
self.begin_bodydata(None)
return 'waitforendscript'
return 'bodydata'
def dataReceived(self, data):
stateTable = self._buildStateTable()
if not self.state:
# all UTF-16 starts with this string
if data.startswith('\xff\xfe'):
self._prepend = '\xff\xfe'
self.encodings.append('UTF-16')
data = data[2:]
elif data.startswith('\xfe\xff'):
self._prepend = '\xfe\xff'
self.encodings.append('UTF-16')
data = data[2:]
self.state = 'begin'
if self.encodings:
data = self._decode(data)
# bring state, lineno, colno into local scope
lineno, colno = self.lineno, self.colno
curState = self.state
# replace saveMark with a nested scope function
_saveMark = self.saveMark
def saveMark():
return (lineno, colno)
self.saveMark = saveMark
# fetch functions from the stateTable
beginFn, doFn, endFn = stateTable[curState]
try:
for byte in data:
# do newline stuff
if byte == '\n':
lineno += 1
colno = 0
else:
colno += 1
newState = doFn(byte)
if newState is not None and newState != curState:
# this is the endFn from the previous state
endFn()
curState = newState
beginFn, doFn, endFn = stateTable[curState]
beginFn(byte)
finally:
self.saveMark = _saveMark
self.lineno, self.colno = lineno, colno
# state doesn't make sense if there's an exception..
self.state = curState
def connectionLost(self, reason):
"""
End the last state we were in.
"""
stateTable = self._buildStateTable()
stateTable[self.state][END_HANDLER]()
# state methods
def do_begin(self, byte):
if byte.isspace():
return
if byte != '<':
if self.beExtremelyLenient:
self._leadingBodyData = byte
return 'bodydata'
self._parseError("First char of document [%r] wasn't <" % (byte,))
return 'tagstart'
def begin_comment(self, byte):
self.commentbuf = ''
def do_comment(self, byte):
self.commentbuf += byte
if self.commentbuf.endswith('-->'):
self.gotComment(self.commentbuf[:-3])
return 'bodydata'
def begin_tagstart(self, byte):
self.tagName = '' # name of the tag
self.tagAttributes = {} # attributes of the tag
self.termtag = 0 # is the tag self-terminating
self.endtag = 0
def do_tagstart(self, byte):
if byte.isalnum() or byte in identChars:
self.tagName += byte
if self.tagName == '!--':
return 'comment'
elif byte.isspace():
if self.tagName:
if self.endtag:
# properly strict thing to do here is probably to only
# accept whitespace
return 'waitforgt'
return 'attrs'
else:
self._parseError("Whitespace before tag-name")
elif byte == '>':
if self.endtag:
self.gotTagEnd(self.tagName)
return 'bodydata'
else:
self.gotTagStart(self.tagName, {})
return (not self.beExtremelyLenient) and 'bodydata' or self.maybeBodyData()
elif byte == '/':
if self.tagName:
return 'afterslash'
else:
self.endtag = 1
elif byte in '!?':
if self.tagName:
if not self.beExtremelyLenient:
self._parseError("Invalid character in tag-name")
else:
self.tagName += byte
self.termtag = 1
elif byte == '[':
if self.tagName == '!':
return 'expectcdata'
else:
self._parseError("Invalid '[' in tag-name")
else:
if self.beExtremelyLenient:
self.bodydata = '<'
return 'unentity'
self._parseError('Invalid tag character: %r'% byte)
def begin_unentity(self, byte):
self.bodydata += byte
def do_unentity(self, byte):
self.bodydata += byte
return 'bodydata'
def end_unentity(self):
self.gotText(self.bodydata)
def begin_expectcdata(self, byte):
self.cdatabuf = byte
def do_expectcdata(self, byte):
self.cdatabuf += byte
cdb = self.cdatabuf
cd = '[CDATA['
if len(cd) > len(cdb):
if cd.startswith(cdb):
return
elif self.beExtremelyLenient:
## WHAT THE CRAP!? MSWord9 generates HTML that includes these
## bizarre <![if !foo]> <![endif]> chunks, so I've gotta ignore
## 'em as best I can. this should really be a separate parse
## state but I don't even have any idea what these _are_.
return 'waitforgt'
else:
self._parseError("Mal-formed CDATA header")
if cd == cdb:
self.cdatabuf = ''
return 'cdata'
self._parseError("Mal-formed CDATA header")
def do_cdata(self, byte):
self.cdatabuf += byte
if self.cdatabuf.endswith("]]>"):
self.cdatabuf = self.cdatabuf[:-3]
return 'bodydata'
def end_cdata(self):
self.gotCData(self.cdatabuf)
self.cdatabuf = ''
def do_attrs(self, byte):
if byte.isalnum() or byte in identChars:
# XXX FIXME really handle !DOCTYPE at some point
if self.tagName == '!DOCTYPE':
return 'doctype'
if self.tagName[0] in '!?':
return 'waitforgt'
return 'attrname'
elif byte.isspace():
return
elif byte == '>':
self.gotTagStart(self.tagName, self.tagAttributes)
return (not self.beExtremelyLenient) and 'bodydata' or self.maybeBodyData()
elif byte == '/':
return 'afterslash'
elif self.beExtremelyLenient:
# discard and move on? Only case I've seen of this so far was:
# <foo bar="baz"">
return
self._parseError("Unexpected character: %r" % byte)
def begin_doctype(self, byte):
self.doctype = byte
def do_doctype(self, byte):
if byte == '>':
return 'bodydata'
self.doctype += byte
def end_doctype(self):
self.gotDoctype(self.doctype)
self.doctype = None
def do_waitforgt(self, byte):
if byte == '>':
if self.endtag or not self.beExtremelyLenient:
return 'bodydata'
return self.maybeBodyData()
def begin_attrname(self, byte):
self.attrname = byte
self._attrname_termtag = 0
def do_attrname(self, byte):
if byte.isalnum() or byte in identChars:
self.attrname += byte
return
elif byte == '=':
return 'beforeattrval'
elif byte.isspace():
return 'beforeeq'
elif self.beExtremelyLenient:
if byte in '"\'':
return 'attrval'
if byte in lenientIdentChars or byte.isalnum():
self.attrname += byte
return
if byte == '/':
self._attrname_termtag = 1
return
if byte == '>':
self.attrval = 'True'
self.tagAttributes[self.attrname] = self.attrval
self.gotTagStart(self.tagName, self.tagAttributes)
if self._attrname_termtag:
self.gotTagEnd(self.tagName)
return 'bodydata'
return self.maybeBodyData()
# something is really broken. let's leave this attribute where it
# is and move on to the next thing
return
self._parseError("Invalid attribute name: %r %r" % (self.attrname, byte))
def do_beforeattrval(self, byte):
if byte in '"\'':
return 'attrval'
elif byte.isspace():
return
elif self.beExtremelyLenient:
if byte in lenientIdentChars or byte.isalnum():
return 'messyattr'
if byte == '>':
self.attrval = 'True'
self.tagAttributes[self.attrname] = self.attrval
self.gotTagStart(self.tagName, self.tagAttributes)
return self.maybeBodyData()
if | |
rf_h = self.image_size / float(self.shape[1]) * scale_y / 2.0
rf_w = self.image_size / float(self.shape[2]) * scale_x / 2.0
else:
rf_h = self.receptive_field[0] * scale_y / 2.0
rf_w = self.receptive_field[1] * scale_x / 2.0
tl = (max(0, center_y - rf_h), max(0, center_x - rf_w))
tr = (max(0, center_y - rf_h), min(image_w, center_x + rf_w))
br = (min(image_h, center_y + rf_h), min(image_w, center_x + rf_w))
bl = (min(image_h, center_y + rf_h), max(0, center_x - rf_w))
return (tl, tr, br, bl)
def _get_receptive_fields(self, fake=False):
""" Get the receptive fields for each patch (in image coordinates)
Args:
fake (bool): Use simple non-overlapping receptive field
Returns:
image_locations (np.array): Receptive fields in image coordinates (h, w, 4, 2)
"""
n, h, w = self.locations.shape
image_locations = np.zeros((h, w, 4, 2), dtype=np.float32)
for (y, x) in np.ndindex((h, w)):
rf = self.calculate_receptive_field(y + 0.5, x + 0.5, fake=fake)
image_locations[y, x, 0] = rf[0]
image_locations[y, x, 1] = rf[1]
image_locations[y, x, 2] = rf[2]
image_locations[y, x, 3] = rf[3]
return image_locations
def _get_centers(self):
""" Get the center for each patch (in image coordinates)
Returns:
image_locations (np.array): Image centers in image coordinates (h, w, 2)
"""
n, h, w = self.locations.shape
image_locations = np.zeros((h, w, 2), dtype=np.float32)
for (y, x) in np.ndindex((h, w)):
image_locations[y, x] = (y + 0.5, x + 0.5)
return image_locations
def _image_to_relative(self, image_locations):
"""Convert image coordinates to relative coordinates
Args:
image_locations (np.array): Input in image coordinates
Returns:
relative_locations (np.array): Input in relative coordinates
"""
return self._ilu.image_to_relative(image_locations, image_width=self.image_size, image_height=self.image_size) # (h, w, 4, 2)
def _relative_to_absolute(self, relative_locations, camera_locations):
"""Convert relative coordinates to absolute coordinates
Args:
relative_locations (np.array): Input in relative coordinates
camera_locations (np.array): Respective camera locations
Returns:
absolute_locations (np.array): Input in absolute coordinates
"""
res = self._ilu.relative_to_absolute(relative_locations, camera_locations)
res = np.rec.fromarrays(res.transpose(), dtype=[("y", np.float32), ("x", np.float32)]).transpose()
return np.rec.fromarrays(res.transpose(), dtype=self.locations.dtype) # No transpose here smh
def _save_patch_locations(self, key, start=None, end=None):
"""Save the patch locations to the currently opened features file
Args:
key (str): Metadata key where the location information is stored
start (int): Start timestamp
end (int): End timestamp
Returns:
None
"""
with h5py.File(self.filename, "r+") as hf:
# Remove the old locations dataset
if key in hf.keys():
del hf[key]
hf.create_dataset(key, data=self[key])
if start is not None and end is not None:
hf[key].attrs["Start"] = start
hf[key].attrs["End"] = end
hf[key].attrs["Duration"] = end - start
hf[key].attrs["Duration (formatted)"] = utils.format_duration(end - start)
def calculate_patch_locations(self, fake=False):
"""Calculate the real world coordinates of every feature vector (patch)
Args:
fake (bool): Use simple non-overlapping receptive field
Returns:
None
"""
key = "locations"
if fake: key = "fake_" + key
assert self.contains_features, "Can only compute patch locations if there are patches"
logger.info("Calculating locations of every patch")
start = time.time()
image_locations = self._get_receptive_fields(fake)
relative_locations = self._image_to_relative(image_locations)
for i in tqdm(range(self[key].shape[0]), desc="Calculating locations", file=sys.stderr):
self[key][i] = self._relative_to_absolute(relative_locations, self[i, 0, 0].camera_locations)
end = time.time()
self.contains_locations = True
self._save_patch_locations(key, start, end)
def calculate_patch_center_locations(self):
"""Calculate the real world coordinates of the center of every feature vector (patch)
*This is currently not in use, but was intended for the spatial binning
variant that only uses the central bin for anomaly detection*
Returns:
None
"""
key = "locations_center"
assert self.contains_features, "Can only compute patch locations if there are patches"
logger.info("Calculating locations of every patch")
start = time.time()
image_locations = self._get_centers()
relative_locations = self._image_to_relative(image_locations)
for i in tqdm(range(self[key].shape[0]), desc="Calculating locations", file=sys.stderr):
self[key][i] = self._relative_to_absolute(relative_locations, self[i, 0, 0].camera_locations)
end = time.time()
self._save_patch_locations(key, start, end)
#################
# Calculations #
#################
def var(self):
"""Calculate the variance"""
return np.var(self.ravel().features, axis=0, dtype=np.float64)
def cov(self):
"""Calculate the covariance matrix"""
return np.cov(self.ravel().features, rowvar=False)
def mean(self):
"""Calculate the mean"""
return np.mean(self.ravel().features, axis=0, dtype=np.float64)
#################
# Misc #
#################
def to_dataset(self):
"""Returns a TensorFlow Dataset of all contained images
Returns:
TensorFlow Dataset with all images in this PatchArray
"""
import tensorflow as tf
def _gen():
for i in range(self.shape[0]):
rgb = cv2.cvtColor(self[i, 0, 0].get_image(), cv2.COLOR_BGR2RGB)
yield (np.array(rgb), self[i, 0, 0].times)
raw_dataset = tf.data.Dataset.from_generator(
_gen,
output_types=(tf.uint8, tf.int64),
output_shapes=((None, None, None), ()))
return raw_dataset.prefetch(tf.data.experimental.AUTOTUNE)
isview = property(lambda self: np.shares_memory(self, self.root))
def get_batch(self, frame, temporal_batch_size):
"""Gets a temporal batch for a given frame by
Args:
frame (PatchArray): Frame to get the temporal batch for
temporal_batch_size (int): Number of frames in the batch
Returns:
np.ndarray with the frames
"""
# Only take patches from the current round (no jumps from the end of the last round).
# Use the root array, so every frame is considered (e.g. no FPS reduction on root array)
current_round = self.root.round_number(frame.round_numbers)
time_index = np.argwhere(current_round.times == frame.times).flat[0]
res = None
for res_i, arr_i in enumerate(range(time_index - temporal_batch_size, time_index)):
# Get and convert the image
image = cv2.cvtColor(current_round[max(0, arr_i), 0, 0].get_image(), cv2.COLOR_BGR2RGB)
if res is None:
res = np.zeros((temporal_batch_size,) + image.shape)
res[res_i,...] = image
return res
def to_temporal_dataset(self, temporal_batch_size=16):
"""Returns a TensorFlow Dataset of all contained images with temporal batches
Args:
temporal_batch_size (int): Number of frames in the batch
Returns:
TensorFlow Dataset with temporal batches of all images in this PatchArray
"""
import tensorflow as tf
def _gen():
for i in range(self.shape[0]):
temporal_batch = self.get_batch(self[i, 0, 0], temporal_batch_size)
yield (temporal_batch, self[i, 0, 0].times)
raw_dataset = tf.data.Dataset.from_generator(
_gen,
output_types=(tf.uint8, tf.int64),
output_shapes=((None, None, None, None), ()))
return raw_dataset.prefetch(tf.data.experimental.AUTOTUNE)
#################
# Metrics #
#################
# def _get_stop_labels(self):
# slack = 5
# labels = self.stop[:,0,0].copy()
# f = labels == 2
# for i, b in enumerate(labels):
# if b and i + slack < labels.size:
# labels[i:i + slack] = 0
# labels[f] = 2
class Metric(object):
"""Helper class for the calculation of different metrics"""
COLORS = {
-1: (100, 100, 100),
0: (150, 150, 150),
1: (80, 175, 76),
2: (54, 67, 244)
}
def __init__(self, name, label_name, per_patch=False, mean=False, names=None, colors=None):
"""Create a new metric
Args:
name (str): Metric name
label_name (str): Metadata key for the used label
per_patch (bool): Calculate metric for each patch (referred to as "per-pixel" in thesis)
mean (bool): True: Take the mean of anomaly scores per frame. False: Use the maximum AD score
names (dict): Dictionary mapping the label values to readable names
colors (dict): Dictionary mapping the label values to colors
Returns:
A new Metric
"""
self.name = name
self.label_name = label_name
self.per_patch = per_patch
self.mean = mean
self.names = names if names is not None else {
0: "Unknown",
1: "No anomaly",
2: "Anomaly"
}
self.current_threshold = -1
def get_relevant(self, patches):
"""Get all patches that will be considered (label value != 0)"""
return patches[patches[self.label_name][:,0,0] != 0]
def get_labels(self, patches):
"""Get all labels"""
if self.per_patch:
return patches[self.label_name].ravel()
else:
return patches[self.label_name][:,0,0]
def get_values(self, mahalanobis_distances):
"""Get all anomaly scores"""
if self.per_patch:
return mahalanobis_distances.ravel()
elif self.mean:
return np.mean(mahalanobis_distances, axis=(1,2))
else:
return np.max(mahalanobis_distances, axis=(1,2))
METRICS = [
Metric("patch", "patch_labels", per_patch=True),
Metric("frame (mean)", "labels", mean=True),
Metric("frame (max)", "labels"),
Metric("stop (mean)", "stop", mean=True, names={-1: "Not set", 0: "It's OK to stop", 1: "Don't stop", 2: "Stop"}),
Metric("stop (max)", "stop", names={-1: "Not set", 0: "It's OK to stop", 1: "Don't stop", 2: "Stop"})
]
def calculate_tsne(self):
"""Calculate and visualize a t-SNE (DEPRECATED)"""
assert self.contains_mahalanobis_distances, "Can't calculate t-SNE without mahalanobis distances calculated"
# TODO: Maybe only validation?
features = self.features.reshape(-1, self.features.shape[-1])
feat_cols = ["feature" + str(i) for i in range(features.shape[1])]
df = pd.DataFrame(features, columns=feat_cols)
df["maha"] = self.mahalanobis_distances.SVG.ravel()
df["l"] = self.patch_labels.ravel()
df["label"] = df["l"].apply(lambda l: "Anomaly" if l == 2 else "No anomaly")
# For reproducability of the results
np.random.seed(42)
rndperm = np.random.permutation(df.shape[0])
N = 10000
df_subset = df.loc[rndperm[:N],:].copy()
data_subset = df_subset[feat_cols].values
time_start = time.time()
tsne = TSNE(n_components=2, verbose=1)
tsne_results = tsne.fit_transform(data_subset)
logger.info("t-SNE done! Time elapsed: {} seconds".format(time.time() - time_start))
df_subset["tsne-2d-one"] = tsne_results[:,0]
df_subset["tsne-2d-two"] = tsne_results[:,1]
fig = plt.figure(figsize=(16,10))
fig.suptitle(os.path.basename(self.filename).replace(".h5", ""), fontsize=20)
LABEL_COLORS = {
"No anomaly": "#4CAF50", # No anomaly
"Anomaly": "#F44336" # Contains anomaly
}
sns.scatterplot(
x="tsne-2d-one", y="tsne-2d-two",
hue="label",
palette=LABEL_COLORS,
data=df_subset,
legend="brief",
alpha=0.4,
size="maha"
)
| |
list_project_files(self, project_id):
query_txt = (
"""{datanode(first:-1,project_id: "%s") {type file_name id object_id}}"""
% (project_id)
)
res = self.sub.query(query_txt)
if len(res["data"]["datanode"]) == 0:
print("Project " + project_id + " has no records in any data_file node.")
return None
else:
df = json_normalize(res["data"]["datanode"])
json_normalize(Counter(df["type"]))
# guids = df.loc[(df['type'] == node)]['object_id']
return df
def get_uuids_for_submitter_ids(self, sids, node):
"""
Get a list of UUIDs for a provided list of submitter_ids.
"""
uuids = []
count = 0
for sid in sids:
count += 1
args = 'submitter_id:"{}"'.format(sid)
res = self.paginate_query(node=node, args=args)
recs = res["data"][node]
if len(recs) == 1:
uuids.append(recs[0]["id"])
elif len(recs) == 0:
print("No data returned for {}:\n\t{}".format(sid, res))
print("\t{}/{}".format(count, len(sids)))
print(
"Finished retrieving {} uuids for {} submitter_ids".format(
len(uuids), len(sids)
)
)
return uuids
def get_records_for_submitter_ids(self, sids, node):
"""
Get a list of UUIDs for a provided list of submitter_ids.
# could also use:{node(submitter_id: "xyz") {id project_id}} #
"""
uuids = []
pids = []
count = 0
for sid in sids:
count += 1
args = 'submitter_id:"{}"'.format(sid)
res = self.paginate_query(node=node, args=args, props=["id", "submitter_id","project_id"])
recs = res["data"][node]
if len(recs) == 1:
uuids.append(recs[0]["id"])
pids.append(recs[0]["project_id"])
elif len(recs) == 0:
print("No data returned for {}:\n\t{}".format(sid, res))
print("\t{}/{}".format(count, len(sids)))
print(
"Finished retrieving {} uuids for {} submitter_ids".format(
len(uuids), len(sids)
)
)
df = pd.DataFrame({'project_id':pids,'uuid':uuids,'submitter_id':sids})
dfs = []
for i in range(len(df)):
sid = df.iloc[i]['submitter_id']
pid = df.iloc[i]['project_id']
uuid = df.iloc[i]['uuid']
prog,proj = pid.split("-",1)
print("({}/{}): {}".format(i+1,len(df),uuid))
mydir = "project_uuids/{}_tsvs".format(pid) # create the directory to store TSVs
if not os.path.exists(mydir):
os.makedirs(mydir)
filename = "{}/{}_{}.tsv".format(mydir,pid,uuid)
if os.path.isfile(filename):
print("File previously downloaded.")
else:
self.sub.export_record(prog, proj, uuid, "tsv", filename)
df1 = pd.read_csv(filename, sep="\t", header=0)
dfs.append(df1)
all_data = pd.concat(dfs, ignore_index=True)
master = "master_uuids_{}.tsv".format(node)
all_data.to_csv("{}".format(master), sep='\t',index=False)
print("Master node TSV with {} total recs written to {}.".format(len(all_data),master))
return all_data
def delete_records(self, uuids, project_id, chunk_size=200, backup=False):
"""
This function attempts to delete a list of UUIDs from a project.
It returns a dictionary with a list of successfully deleted UUIDs,
a list of those that failed, all the API responses, and all the error messages.
Args:
uuids(list): A list of the UUIDs to delete.
project_id(str): The project to delete the IDs from.
chunk_size(int): The number of records to delete in each API request.
backup(str): If provided, deleted records are backed up to this filename.
Example:
delete_records(project_id=project_id,uuids=uuids,chunk_size=200)
"""
program, project = project_id.split("-", 1)
if isinstance(uuids, str):
uuids = [uuids]
if not isinstance(uuids, list):
raise Gen3Error(
"Please provide a list of UUID(s) to delete with the 'uuid' argument."
)
if backup:
ext = backup.split(".")[-1]
fname = ".".join(backup.split(".")[0:-1])
count = 0
while path.exists(backup):
count += 1
backup = "{}_{}.{}".format(fname, count, ext)
count = 0
print(
"Attempting to backup {} records to delete to file '{}'.".format(
len(uuids), backup
)
)
records = []
for uuid in uuids:
count += 1
try:
response = self.sub.export_record(
program=program,
project=project,
uuid=uuid,
fileformat="json",
filename=None,
)
record = json.loads(json.dumps(response[0]))
records.append(record)
print(
"\tRetrieving record for UUID '{}' ({}/{}).".format(
uuid, count, len(uuids)
)
)
except Exception as e:
print(
"Exception occurred during 'export_record' request: {}.".format(
e
)
)
continue
with open(backup, "w") as backfile:
backfile.write("{}".format(records))
responses = []
errors = []
failure = []
success = []
retry = []
tried = []
results = {}
while len(tried) < len(uuids): # loop sorts all uuids into success or failure
if len(retry) > 0:
print("Retrying deletion of {} valid UUIDs.".format(len(retry)))
list_ids = ",".join(retry)
retry = []
else:
list_ids = ",".join(uuids[len(tried) : len(tried) + chunk_size])
rurl = "{}/api/v0/submission/{}/{}/entities/{}".format(
self._endpoint, program, project, list_ids
)
try:
# print("\n\trurl='{}'\n".format(rurl)) # trouble-shooting
# print("\n\tresp = requests.delete(rurl, auth=auth)")
# print("\n\tprint(resp.text)")
resp = requests.delete(rurl, auth=self._auth_provider)
except Exception as e:
chunk_size = int(chunk_size / 2)
print(
"Exception occurred during delete request:\n\t{}.\n\tReducing chunk_size to '{}'.".format(
e, chunk_size
)
)
continue
if (
"414 Request-URI Too Large" in resp.text
or "service failure" in resp.text
):
chunk_size = int(chunk_size / 2)
print(
"Service Failure. The chunk_size is too large. Reducing to '{}'".format(
chunk_size
)
)
elif "The requested URL was not found on the server." in resp.text:
print(
"\n Requested URL not found on server:\n\t{}\n\t{}".format(
resp, rurl
)
) # debug
break
else: # the delete request got an API response
# print(resp.text) #trouble-shooting
output = json.loads(resp.text)
responses.append(output)
if output["success"]: # 'success' == True or False in API response
success = list(set(success + [x["id"] for x in output["entities"]]))
else: # if one UUID fails to delete in the request, the entire request fails.
for entity in output["entities"]:
if entity[
"valid"
]: # get the valid entities from repsonse to retry.
retry.append(entity["id"])
else:
errors.append(entity["errors"][0]["message"])
failure.append(entity["id"])
failure = list(set(failure))
for error in list(set(errors)):
print(
"Error message for {} records: {}".format(
errors.count(error), error
)
)
tried = list(set(success + failure))
print(
"\tProgress: {}/{} (Success: {}, Failure: {}).".format(
len(tried), len(uuids), len(success), len(failure)
)
)
# exit the while loop if
results["failure"] = failure
results["success"] = success
results["responses"] = responses
results["errors"] = errors
print("\tFinished record deletion script.")
return results
def delete_node(self, node, project_id, chunk_size=200):
"""
This function attempts to delete all the records in a particular node of a project.
It returns the results of the delete_records function.
"""
try:
uuids = self.get_uuids_in_node(node, project_id)
except:
raise Gen3Error(
"Failed to get UUIDs in the node '"
+ node
+ "' of project '"
+ project_id
+ "'."
)
if len(uuids) != 0:
print(
"Attemping to delete "
+ str(len(uuids))
+ " records in the node '"
+ node
+ "' of project '"
+ project_id
+ "'."
)
try:
results = self.delete_records(uuids, project_id, chunk_size)
print(
"Successfully deleted "
+ str(len(results["success"]))
+ " records in the node '"
+ node
+ "' of project '"
+ project_id
+ "'."
)
if len(results["failure"]) > 0:
print(
"Failed to delete "
+ str(len(results["failure"]))
+ " records. See results['errors'] for the error messages."
)
except:
raise Gen3Error(
"Failed to delete UUIDs in the node '"
+ node
+ "' of project '"
+ project_id
+ "'."
)
return results
def get_submission_order(
self,
root_node="project",
excluded_schemas=[
"_definitions",
"_settings",
"_terms",
"program",
"project",
"root",
"data_release",
"metaschema",
],
):
"""
This function gets a data dictionary, and then it determines the submission order of nodes by looking at the links.
The reverse of this is the deletion order for deleting projects. (Must delete child nodes before parents).
"""
dd = self.sub.get_dictionary_all()
schemas = list(dd)
nodes = [k for k in schemas if k not in excluded_schemas]
submission_order = [
(root_node, 0)
] # make a list of tuples with (node, order) where order is int
while (
len(submission_order) < len(nodes) + 1
): # "root_node" != in "nodes", thus the +1
for node in nodes:
if (
len([item for item in submission_order if node in item]) == 0
): # if the node != in submission_order
# print("Node: {}".format(node))
node_links = dd[node]["links"]
parents = []
for link in node_links:
if "target_type" in link: # node = 'webster_step_second_test'
parents.append(link["target_type"])
elif "subgroup" in link: # node = 'expression_array_result'
sub_links = link.get("subgroup")
if not isinstance(sub_links, list):
sub_links = [sub_links]
for sub_link in sub_links:
if "target_type" in sub_link:
parents.append(sub_link["target_type"])
if False in [
i in [i[0] for i in submission_order] for i in parents
]:
continue # if any parent != already in submission_order, skip this node for now
else: # submit this node after the last parent to submit
parents_order = [
item for item in submission_order if item[0] in parents
]
submission_order.append(
(node, max([item[1] for item in parents_order]) + 1)
)
return submission_order
def delete_project(self, project_id, root_node="project", chunk_size=200):
submission_order = self.get_submission_order(root_node=root_node)
delete_order = sorted(submission_order, key=lambda x: x[1], reverse=True)
nodes = [i[0] for i in delete_order]
try:
nodes.remove("project")
except:
print("No 'project' node in list of nodes.")
for node in nodes:
print("\nDeleting node '{}' from project '{}'.".format(node, project_id))
data = self.delete_node(
node=node, project_id=project_id, chunk_size=chunk_size
)
prog, proj = project_id.split("-", 1)
try:
data = self.sub.delete_project(program=prog, project=proj)
except Exception as e:
print("Couldn't delete project '{}':\n\t{}".format(project_id, e))
if "Can not delete the project." in | |
is None:
W = SU2_get_Ws(tl)
prod = sp.array([[1]])
for i in xrange(tl):
prod = sp.kron(U, prod)
Ul = sp.zeros((tl + 1, tl + 1), dtype=sp.complex128)
for m in xrange(tl + 1):
for n in xrange(tl + 1):
Ul[m, n] = W[m].T.dot(prod.dot(W[n]))
return Ul
paus = [0.5 * sp.array([[0, 1], [1, 0]]),
0.5j * sp.array([[0, -1], [1, 0]]),
0.5 * sp.array([[1, 0], [0, -1]])]
def SU2_get_gen(al, tl, W=None):
if W is None:
W = SU2_get_Ws(tl)
pau = paus[al]
M = sp.zeros((2**tl, 2**tl), dtype=sp.complex128)
for n in xrange(tl):
M += sp.kron(sp.eye(2**(n)), sp.kron(pau, sp.eye(2**(tl - n - 1))))
tau = sp.zeros((tl + 1, tl + 1), dtype=sp.complex128)
for m in xrange(tl + 1):
for n in xrange(tl + 1):
tau[m, n] = W[m].T.dot(M.dot(W[n]))
return tau
def SU2_test_irreps(tl):
l = tl / 2.
W = SU2_get_Ws(tl)
taus = [SU2_get_gen(al, tl, W=W) for al in [0, 1, 2]]
eye_test = taus[0].dot(taus[0].conj().T) + taus[1].dot(taus[1].conj().T) + taus[2].dot(taus[2].conj().T)
print "test generators:", sp.allclose(eye_test, sp.eye(tl + 1) * l * (l + 1))
print "[t0,t1] - it2 = 0:", sp.allclose(taus[0].dot(taus[1]) - taus[1].dot(taus[0]), 1.j * taus[2])
print "[t2,t0] - it1 = 0:", sp.allclose(taus[2].dot(taus[0]) - taus[0].dot(taus[2]), 1.j * taus[1])
print "[t1,t2] - it0 = 0:", sp.allclose(taus[1].dot(taus[2]) - taus[2].dot(taus[1]), 1.j * taus[0])
om = sp.rand(3)
G_half = la.expm(1.j * (om[0] * paus[0] + om[1] * paus[1] + om[2] * paus[2]))
print "G_half unitary", sp.allclose(G_half.dot(G_half.conj().T), sp.eye(2))
Gl = la.expm(1.j * (om[0] * taus[0] + om[1] * taus[1] + om[2] * taus[2]))
print "G_l unitary", sp.allclose(Gl.dot(Gl.conj().T), sp.eye(tl + 1))
Gl_ = SU2_get_irrep(G_half, tl, W=W)
print "G_l test", sp.allclose(Gl, Gl_)
def SU2_get_PL(max_2l=3):
itb = SU2_build_index_ints(max_2l=max_2l)
dim = len(itb)
tl = 0
tau_l = SU2_get_gen(0, tl)
PL = [None] * 3
for al in [0, 1, 2]:
PL[al] = sp.zeros((dim, dim), dtype=sp.complex128)
for mL in xrange(dim):
for mR in xrange(dim):
tlL, jpL, kpL = itb[mL]
tlR, jpR, kpR = itb[mR]
if not (tlL == tlR and kpL == kpR):
continue
if tlL != tl:
tl = tlL
tau_l = SU2_get_gen(al, tl)
PL[al][mL, mR] = tau_l[jpR, jpL]
return PL
def SU2_get_PR(max_2l=3):
itb = SU2_build_index_ints(max_2l=max_2l)
dim = len(itb)
tl = 0
tau_l = SU2_get_gen(0, tl)
PR = [None] * 3
for al in [0, 1, 2]:
PR[al] = sp.zeros((dim, dim), dtype=sp.complex128)
for mL in xrange(dim):
for mR in xrange(dim):
tlL, jpL, kpL = itb[mL]
tlR, jpR, kpR = itb[mR]
if not (tlL == tlR and jpL == jpR):
continue
if tlL != tl:
tl = tlL
tau_l = SU2_get_gen(al, tl)
PR[al][mL, mR] = -tau_l[kpL, kpR]
return PR
def SU2_test_U_PL(max_2l=3):
PL = SU2_get_PL(max_2l=max_2l)
U = SU2_get_U(max_2l=max_2l)
print "U_0,0 = U*_1,1", sp.allclose(U[0][0], U[1][1].conj().T)
print "U_0,1 = -U*_1,0", sp.allclose(U[0][1], -U[1][0].conj().T)
#print "U_0,0 U*_0,0 = 1 - U_0,1 U*_0,1", sp.allclose(U[0][0].dot(U[1][1]), sp.eye(U[0][0].shape[0]) + U[0][1].dot(U[1][0]))
for al in [0, 1, 2]:
for m in [0, 1]:
for n in [0, 1]:
com = PL[al].dot(U[m][n]) - U[m][n].dot(PL[al])
com_ = 0
for k in [0, 1]:
com_ += paus[al][m, k] * U[k][n]
print "[PL_%d, U_%d,%d] = (F_%d U)_%d,%d:" % (al, m, n, al, m, n), \
sp.allclose(com, com_), la.norm(com - com_)
for al in [0, 1, 2]:
for m in [0, 1]:
for n in [0, 1]:
com = PL[al].dot(U[m][n].conj().T) - U[m][n].conj().T.dot(PL[al])
com_ = 0
for k in [0, 1]:
com_ += -paus[al][k, m] * U[k][n].conj().T
print "[PL_%d, U*_%d,%d] = (U*' F_%d)_%d,%d:" % (al, m, n, al, m, n), \
sp.allclose(com, com_), la.norm(com - com_)
P2 = SU2_get_P2(max_2l=max_2l)
P2_ = PL[0].dot(PL[0]) + PL[1].dot(PL[1]) + PL[2].dot(PL[2])
print "P2 = PL_0^2 + PL_1^2 + PL_2^2:", sp.allclose(P2, P2_)
d_maxtl = sp.sum((max_2l + 1)**2)
start_maxtl = len(P2) - d_maxtl
UUd = sp.zeros_like(U[0][0])
for m in [0, 1]:
for n in [0, 1]:
UUd.fill(0)
for k in [0, 1]:
UUd += U[m][k].dot(U[n][k].conj().T)
print "(U U^dag)_%d,%d = delta_%d,%d (restricted to all but highest irrep):" % (m, n, m, n), \
sp.allclose(UUd[:start_maxtl, :start_maxtl], 0 if m != n else sp.eye(start_maxtl))
print "Error (norm distance) in highest irrep:", la.norm(UUd[start_maxtl:, start_maxtl:] - 0 if m != n else sp.eye(d_maxtl))
eijk = sp.zeros((3, 3, 3))
eijk[0, 1, 2] = eijk[1, 2, 0] = eijk[2, 0, 1] = 1
eijk[0, 2, 1] = eijk[2, 1, 0] = eijk[1, 0, 2] = -1
for al in [0, 1, 2]:
for be in [0, 1, 2]:
com = PL[al].dot(PL[be]) - PL[be].dot(PL[al])
gas = sp.argwhere(eijk[al, be, :] != 0)
if len(gas) > 0:
ga = gas[0]
com_ = -1.j * eijk[al, be, ga] * PL[ga]
else:
com_ = 0
print "[PL_%d, PL_%d] = -1j * (eps^%d,%d_ga PL_ga)" % (al, be, al, be), sp.allclose(com, com_)
def SU2_get_L(U, max_2l=3):
itb = SU2_build_index_ints(max_2l=max_2l)
dim = len(itb)
tl = 0
G_l = sp.array([[1]])
L = sp.zeros((dim, dim), dtype=sp.complex128)
for mL in xrange(dim):
for mR in xrange(dim):
tlL, jpL, kpL = itb[mL]
tlR, jpR, kpR = itb[mR]
if not (tlL == tlR and kpL == kpR):
continue
if tlL != tl:
tl = tlL
G_l = SU2_get_irrep(U.conj().T, tl)
L[mL, mR] = G_l[jpR, jpL]
return L
def SU2_get_R(U, max_2l=3):
itb = SU2_build_index_ints(max_2l=max_2l)
dim = len(itb)
tl = 0
G_l = sp.array([[1]])
R = sp.zeros((dim, dim), dtype=sp.complex128)
for mL in xrange(dim):
for mR in xrange(dim):
tlL, jpL, kpL = itb[mL]
tlR, jpR, kpR = itb[mR]
if not (tlL == tlR and jpL == jpR):
continue
if tlL != tl:
tl = tlL
G_l = SU2_get_irrep(U, tl)
R[mL, mR] = G_l[kpL, kpR]
return R
def SU2_get_random_U():
om = sp.rand(3)
U = la.expm(1.j * (om[0] * paus[0] + om[1] * paus[1] + om[2] * paus[2]))
return U
def SU2_test_LR(max_2l=3):
U = SU2_get_random_U()
V = SU2_get_random_U()
L_U = SU2_get_L(U, max_2l=max_2l)
R_V = SU2_get_R(V, max_2l=max_2l)
print "[L_U, R_V] = 0:", sp.allclose(L_U.dot(R_V) - R_V.dot(L_U), 0), la.norm(L_U.dot(R_V) - R_V.dot(L_U))
L_Ui = SU2_get_L(U.conj().T, max_2l=max_2l)
R_Vi = SU2_get_R(V.conj().T, max_2l=max_2l)
print "L_Ui = L_U^dag:", sp.allclose(L_U.conj().T, L_Ui)
print "R_Vi = R_V^dag:", sp.allclose(R_V.conj().T, R_Vi)
L_I = SU2_get_L(sp.eye(2), max_2l=max_2l)
R_I = SU2_get_L(sp.eye(2), max_2l=max_2l)
print "L_I = I:", sp.allclose(L_I, sp.eye(len(R_I)))
print "R_I = I:", sp.allclose(R_I, sp.eye(len(R_I)))
def SU2_get_P2(max_2l=3):
twols = sp.arange(max_2l + 1)
dim = sp.sum((twols + 1)**2)
P2diag = []
for twol in twols:
P2diag += [twol/2. * (twol/2. + 1)] * (twol + 1)**2
assert len(P2diag) == dim
return sp.diag(sp.array(P2diag, dtype=sp.float64))
def SU2_build_index(max_2l=3):
tbl = []
for twol in xrange(max_2l + 1):
l = twol/2.
tbl += [[l, jp - l, kp - l] for jp in xrange(twol + 1) for kp in xrange(twol + 1)]
return sp.array(tbl, dtype=sp.float64)
def SU2_build_index_ints(max_2l=3):
tbl = []
for twol in xrange(max_2l + 1):
tbl += [[twol, jp, kp] for jp in xrange(twol + 1) for kp in xrange(twol + 1)]
return sp.array(tbl)
def SU2_build_CGs(max_2l=3):
"""This grabs a tensor of Clebsch-Gordan coefficients <lL,mL|lR,mR;1/2,mM>
skipping the zeros where the l1 != l3 +/- 1/2.
There is a cutoff in l given by max_2l.
Uses sympy to get the CG coeffients exactly before converting to floats.
"""
from sympy.physics.quantum.cg import CG
vtb = []
half = sy.S(1) / 2
for twolL in xrange(max_2l + 1):
vtb_ = [None] * (max_2l + 1)
lL = sy.S(twolL) / 2
for twolR in [twolL - 1, twolL + 1]:
if twolR > max_2l or twolR < 0:
continue
lR = sy.S(twolR) / 2
vtb_[twolR] = [[[sy.N(CG(lR, mRp - lR, half, mMp - half, lL, mLp - lL).doit())
for mRp in xrange(twolR + 1)]
for mMp in [0, 1]]
for mLp in xrange(twolL + 1)]
vtb.append(vtb_)
return vtb
def SU2_test_CGs(max_2l=3):
CGs = SU2_build_CGs(max_2l=max_2l)
UCG = sp.zeros((6, 6), dtype=sp.float64)
UCG.fill(sp.NaN)
tlR = 2
for tlL in [tlR - 1, tlR + 1]:
if tlL < 0 or tlL > max_2l:
continue
shft = (4 if tlL == 1 else 0)
for mLp in xrange(tlL + 1):
for mMp in [0, 1]:
for mRp in xrange(tlR + 1):
UCG[shft + mLp, | |
<filename>test_Feb_18.py
#7.7.2019: Addition of axis ticks and labels and legends.
#7.7.2019: classifier comparison alpha colors adjusted to make testing points more transparent, removed QDA and neural net
##7.7.2019: runlog60. Emphasis on knn and svc.
##7.8.2019: n_neighbors reduced to 10; too slow
#7.13.2019: module importing condensed to top of script. running runLog017.
#7.18.2019: runlog60.
print(__doc__)
import pandas as pd
import csv
from numpy import array
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
from matplotlib.colors import ListedColormap
from sklearn import neighbors, datasets, svm
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from matplotlib.colors import ListedColormap
from sklearn import neighbors, datasets
from sklearn.neighbors import NearestCentroid
from mpl_toolkits.mplot3d import Axes3D
from sklearn.cluster import KMeans
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
aa1 = (pd.read_csv(r"C:\\.csv", names=['classs','hum1.0', 'temp1.0', 'hum2.0', 'temp2.0', 'hum3.0', #4
'temp3.0', 'hum4.0', 'temp4.0', 'hum5.0', 'temp5.0', #9
'locA', 'statusA', 'statusB', 'hum1.1', #13
'temp1.1', 'hum2.1', 'temp2.1', 'hum3.1', 'temp3.1', #18
'hum4.1', 'temp4.1', 'hum5.1', 'temp5.1', 'status', #23
'hum1Dif', 'temp1Dif', 'hum2dif', 'temp2Dif', 'hum3dif', #28
'temp3Dif' 'hum4Dif', 'temp4Dif', 'hum5dif', 'temp5Dif', #33
'startTime', 'endTime', 'elapsedTime'])) #36
print(aa1)
aa1 = array(aa1)
print(aa1[:,36])
def plot_decision_function(classifier, sample_weight, axis, title):
# plot the decision function
y_min, y_max = X[:, 1].min() - 2, X[:, 1].max() + 2
xx, yy = np.meshgrid(np.linspace(8, 14, 1000000), np.linspace(y_min, y_max, 1000000))
Z = classifier.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# plot the line, the points, and the nearest vectors to the plane
plot.contourf(xx, yy, Z, alpha=0.65, cmap=plt.cm.plasma)
axis.scatter(X[:, 0], X[:, 1], c=y, s=32 * sample_weight, alpha=0.55,
cmap=plt.cm.plasma, edgecolors='black')
x_min, x_max = X[:, 0].min()-1, X[:, 0].max()+1
y_min, y_max = X[:, 1].min() - 10, X[:, 1].max() + 10
## axis.axis('on')
axis.set_title(title)
jfk = ax.contourf(xx, yy, Z, cmap=cm, alpha=.75)
plt.colorbar(jfk, ax=ax)
plt.scatter(x=(aa1[:,12]),y=(aa1[:,24]))
plt.title("phase")
plt.show()
plt.hist(x=(aa1[:,12]),bins=100)
plt.show()
y31=((aa1[:,24])/(aa1[:,36]))
y32=((aa1[:,25])/(aa1[:,36]))
y33=((aa1[:,26])/(aa1[:,36]))
y34=((aa1[:,27])/(aa1[:,36]))
y35=((aa1[:,28])/(aa1[:,36]))
y36=((aa1[:,29])/(aa1[:,36]))
y37=((aa1[:,30])/(aa1[:,36]))
y38=((aa1[:,31])/(aa1[:,36]))
y39=((aa1[:,32])/(aa1[:,36]))
def rocFxn(r,s,u,v):
plt.title(v)
plt.title("change / time elapsed")
plt.plasma
plt.scatter(x=(r), y=(s))
plt.show()
plt.title(v)
plt.scatter(x=(r), y=(u))
plt.show()
plt.title(v)
plt.hist(x=(s), bins=75)
plt.show()
plt.title(v)
plt.hist(x=(s), bins=55)
plt.show()
rocFxn((aa1[:,10]),(y32), (aa1[:,26]), ("hum1"))
"""
=====================
Classifier comparison
=====================
A comparison of a several classifiers in scikit-learn on synthetic datasets.
The point of this example is to illustrate the nature of decision boundaries
of different classifiers.
This should be taken with a grain of salt, as the intuition conveyed by
these examples does not necessarily carry over to real datasets.
Particularly in high-dimensional spaces, data can more easily be separated
linearly and the simplicity of classifiers such as naive Bayes and linear SVMs
might lead to better generalization than is achieved by other classifiers.
The plots show training points in solid colors and testing points
semi-transparent. The lower right shows the classification accuracy on the test
set.
"""
print(__doc__)
##Code source: <NAME>
## <NAME>
## Modified for documentation by <NAME>
## License: BSD 3 clause
import csv
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
h = .02 # step size in the mesh
names = ["Nearest Neighbors", "Linear SVM", "RBF SVM", "Gaussian Process"]
##,
## "Decision Tree", "Random Forest", "AdaBoost", "Naive Bayes"]#"Neural Net", "AdaBoost",
#"Naive Bayes"], "QDA"]
classifiers = [
KNeighborsClassifier(4),
SVC(kernel="linear", C=1),
SVC(gamma=2, C=1),
GaussianProcessClassifier(1.0 * RBF(1.0)),
DecisionTreeClassifier(max_depth=5),
RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1),
AdaBoostClassifier(),
GaussianNB()]#,
#################################################################################################################################
X = aa1[:, [10,24]]
y = (aa1[:, 11]
.tolist())
x_min, x_max = X[:, 0].min()-1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 10, X[:, 1].max() + 10
datasets = X, y
figure = plt.figure()
i = 1
# iterate over datasets
for ds_cnt, ds in enumerate(datasets):
# preprocess dataset, split into training and test part
## X, y = ds
## X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=.4, random_state=42)
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# just plot the dataset first
cm = plt.cm.plasma
cm_bright = plt.cm.plasma#ListedColormap(['#FF0000', '#0000FF'])
ax = plt.subplot(2,5,i)#len(datasets), len(classifiers) + 1, i)
if ds_cnt == 0:
ax.set_title("Input data")
# Plot the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright,
edgecolors='k')
# Plot the testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6,
edgecolors='k')
ax.axis('on')
i += 1
# iterate over classifiers
for name, clf in zip(names, classifiers):
ax = plt.subplot(2,5,i)#len(datasets), len(classifiers) + 1, i)
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
if hasattr(clf, "decision_function"):
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
else:
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
# Put the result into a color plot
Z = Z.reshape(xx.shape)
print(xx.shape)
ax.contourf(xx, yy, Z, cmap=cm, alpha=.75)
print(ax.contourf)
plt.axis('on')
# Plot the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright,
edgecolors='k')
# Plot the testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright,
edgecolors='k', alpha=0.36)
if ds_cnt == 0:
ax.set_title(name)
ax.text(xx.max() - .3, yy.min() + .3, ('%.2f' % score).lstrip('0'),
size=8, horizontalalignment='right')
i += 1
ax.axis('on')
jfk = ax.contourf(xx, yy, Z, cmap=cm, alpha=.75)
plt.colorbar(jfk, ax=ax)
plt.legend()
#############################################################################
X = aa1[:, [10,26]]
y = (aa1[:, 11]
.tolist())
datasets = X, y
figure = plt.figure()
i = 1
# iterate over datasets
for ds_cnt, ds in enumerate(datasets):
# preprocess dataset, split into training and test part
## X, y = ds
## X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=.4, random_state=42)
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# just plot the dataset first
cm = plt.cm.plasma
cm_bright = plt.cm.plasma#ListedColormap(['#FF0000', '#0000FF'])
ax = plt.subplot(111)#2,5,i)#len(datasets), len(classifiers) + 1, i)
if ds_cnt == 0:
ax.set_title("Input data")
# Plot the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright,
edgecolors='k')
# Plot the testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6,
edgecolors='k')
ax.axis('on')
i += 1
# iterate over classifiers
for name, clf in zip(names, classifiers):
ax = plt.subplot(2,5,i)#len(datasets), len(classifiers) + 1, i)
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
if hasattr(clf, "decision_function"):
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
else:
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
# Put the result into a color plot
Z = Z.reshape(xx.shape)
ax.contourf(xx, yy, Z, cmap=cm, alpha=.75)
plt.axis('on')
# Plot the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright,
edgecolors='k')
# Plot the testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright,
edgecolors='k', alpha=0.36)
if ds_cnt == 0:
ax.set_title(name)
ax.text(xx.max() - .3, yy.min() + .3, ('%.2f' % score).lstrip('0'),
size=8, horizontalalignment='right')
i += 1
ax.axis('on')
jfk = ax.contourf(xx, yy, Z, cmap=cm, alpha=.75)
plt.colorbar(jfk, ax=ax)
plt.legend()
##print(time.perfcounter())
#######################################################################################################################################################################################
#######################################################################################################################################################################################
X = aa1[:, [10,28]]
y = (aa1[:, 11]
.tolist())
datasets = X, y
figure = plt.figure()#figsize=(7, 13))
i = 1
# iterate over datasets
for ds_cnt, ds in enumerate(datasets):
# preprocess dataset, split into training and test part
## X, y = ds
## X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=.4, random_state=42)
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# just plot the dataset first
cm = plt.cm.plasma
cm_bright = plt.cm.plasma #ListedColormap(['#FF0000', '#0000FF'])
ax = plt.subplot(2,5,i)#len(datasets), len(classifiers) + 1, i)
if ds_cnt == 0:
ax.set_title("Input data")
# Plot the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright,
edgecolors='k')
# Plot the testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6,
edgecolors='k')
ax.axis('on')
i += 1
# iterate over classifiers
for name, clf in zip(names, classifiers):
ax = plt.subplot(2,5,i)#len(datasets), len(classifiers) + 1, i)
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
if hasattr(clf, "decision_function"):
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
else:
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
# Put the result into a color plot
Z = Z.reshape(xx.shape)
print(xx)
ax.contourf(xx, yy, Z, cmap=cm, alpha=.75)
plt.axis('on')
# Plot the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright,
edgecolors='k')
# Plot the testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright,
edgecolors='k', alpha=0.36)
if ds_cnt == 0:
ax.set_title(name)
ax.text(xx.max() - .3, yy.min() + .3, ('%.2f' % score).lstrip('0'),
size=8, horizontalalignment='right')
i += 1
ax.axis('on')
jfk = ax.contourf(xx, yy, Z, cmap=cm, alpha=.75)
plt.colorbar(jfk, ax=ax)
plt.legend()
plt.show()
"""
=========================================================
K-means Clustering
=========================================================
The plots display firstly what a K-means algorithm would yield
using three clusters. It is then shown what the effect of a bad
initialization is on the classification process:
By setting n_init to only 1 (default is 10), the amount of
times | |
st.session_state:
Load_Data(20)
## Title
if st.session_state['init_data']['finished_game']:
st.markdown("<h1 style='text-align:left; float:left; color:blue; margin:0px;'>Guess Who?</h1>", unsafe_allow_html=True)
else:
st.markdown("<h1 style='text-align:left; float:left; color:blue; margin:0px;'>Guess Who?</h1><h2 style='text-align:right; float:right; color:gray; margin:0px;'>score: "+ str(st.session_state['init_data']['award'])+"</h2>", unsafe_allow_html=True)
## GAME
if Reset_App:
Load_Data(Total_Images_Number)
Restart_App = st.button('GO TO IMAGES SELECTION TO START A NEW GAME', key='Restart_App')
else:
## FINISHED GAME BUTTON TO RELOAD GAME
if st.session_state['init_data']['finished_game']:
Restart_App = st.button('GO TO IMAGES SELECTION TO START NEW GAME', key='Restart_App')
if st.session_state['init_data']['award']==1 or st.session_state['init_data']['award']==-1:
st.markdown("<h1 style='text-align:left; float:left; color:black; margin-left:0px; margin-right:15px; margin-top:0px; margin-bottom:0px;'>¡¡¡ FINISHED WITH</h1><h1 style='text-align:left; float:left; color:green; margin-left:0px; margin-right:15px; margin-top:0px; margin-bottom:0px;'>"+str(st.session_state['init_data']['award'])+"</h1><h1 style='text-align:left; float:left; color:black; margin:0px;'>POINT !!!</h1>", unsafe_allow_html=True)
else:
st.markdown("<h1 style='text-align:left; float:left; color:black; margin-left:0px; margin-right:15px; margin-top:0px; margin-bottom:0px;'>¡¡¡ FINISHED WITH</h1><h1 style='text-align:left; float:left; color:green; margin-left:0px; margin-right:15px; margin-top:0px; margin-bottom:0px;'>"+str(st.session_state['init_data']['award'])+"</h1><h1 style='text-align:left; float:left; color:black; margin:0px;'>POINTS !!!</h1>", unsafe_allow_html=True)
else:
st.session_state['init_data']['images_selected']=False
## INITIALIZATION (SELECT FIGURES)
if not st.session_state['init_data']['start_game']:
## Select images source
st.sidebar.markdown('## Image selection source:')
Selected_Images_Source=st.sidebar.selectbox('(Choose between default random images or specific source path)',
['Use Celeba dataset random images',
'Use friends random images',
'Use family random images',
'Use images from specific path'],
index=0, key='Selected_Images_Source', help=None)
## Select images source - Celeba default
if Selected_Images_Source=='Use Celeba dataset random images':
st.session_state['init_data']['zip_file']='guess_who_images.zip'
if st.session_state['init_data']['zip_file']!=st.session_state['init_data']['previous_zip_file']:
st.session_state['init_data']['previous_zip_file']=st.session_state['init_data']['zip_file']
Select_Images_Randomly()
st.session_state['init_data']['winner_options']=st.session_state['init_data']['current_image_names']
## Default source text
st.markdown("<h2 style='text-align:left; float:left; color:black; margin:0px;'>1. Choose the images you like.</h2>",
unsafe_allow_html=True)
st.markdown("<h3 style='text-align:left; float:left; color:gray; margin:0px;'>Press the button to randomly modify the selected images.</h3>",
unsafe_allow_html=True)
## Button - randomly change Celeba images
Random_Images = st.button('CHANGE IMAGES', key='Random_Images')
if Random_Images:
Select_Images_Randomly()
st.session_state['init_data']['winner_options']=st.session_state['init_data']['current_image_names']
## Button - start game
st.markdown("<h2 style='text-align:left; float:left; color:black; margin:0px;'>2. Press the button to start the game.</h2>", unsafe_allow_html=True)
Use_Images = st.button('START GAME', key='Use_Images')
if Use_Images:
## Choose winner and start game
st.session_state['init_data']['current_winner_index']=random.choice(list(range(0,st.session_state['init_data']['N_images'])))
st.session_state['init_data']['start_game']=True
st.session_state['init_data']['images_selected']=True
## Select images source - Friends default
if Selected_Images_Source=='Use friends random images':
st.session_state['init_data']['zip_file']='guess_who_images_friends.zip'
if st.session_state['init_data']['zip_file']!=st.session_state['init_data']['previous_zip_file']:
st.session_state['init_data']['previous_zip_file']=st.session_state['init_data']['zip_file']
Select_Images_Randomly()
st.session_state['init_data']['winner_options']=st.session_state['init_data']['current_image_names']
## Default source text
st.markdown("<h2 style='text-align:left; float:left; color:black; margin:0px;'>1. Choose the images you like.</h2>",
unsafe_allow_html=True)
st.markdown("<h3 style='text-align:left; float:left; color:gray; margin:0px;'>Press the button to randomly modify the selected images.</h3>",
unsafe_allow_html=True)
## Button - randomly change Celeba images
Random_Images = st.button('CHANGE IMAGES', key='Random_Images')
if Random_Images:
Select_Images_Randomly()
st.session_state['init_data']['winner_options']=st.session_state['init_data']['current_image_names']
## Button - start game
st.markdown("<h2 style='text-align:left; float:left; color:black; margin:0px;'>2. Press the button to start the game.</h2>", unsafe_allow_html=True)
Use_Images = st.button('START GAME', key='Use_Images')
if Use_Images:
## Choose winner and start game
st.session_state['init_data']['current_winner_index']=random.choice(list(range(0,st.session_state['init_data']['N_images'])))
st.session_state['init_data']['start_game']=True
st.session_state['init_data']['images_selected']=True
## Select images source - Celeba default
if Selected_Images_Source=='Use family random images':
st.session_state['init_data']['zip_file']='guess_who_images_family.zip'
if st.session_state['init_data']['zip_file']!=st.session_state['init_data']['previous_zip_file']:
st.session_state['init_data']['previous_zip_file']=st.session_state['init_data']['zip_file']
Select_Images_Randomly()
st.session_state['init_data']['winner_options']=st.session_state['init_data']['current_image_names']
## Default source text
st.markdown("<h2 style='text-align:left; float:left; color:black; margin:0px;'>1. Choose the images you like.</h2>",
unsafe_allow_html=True)
st.markdown("<h3 style='text-align:left; float:left; color:gray; margin:0px;'>Press the button to randomly modify the selected images.</h3>",
unsafe_allow_html=True)
## Button - randomly change Celeba images
Random_Images = st.button('CHANGE IMAGES', key='Random_Images')
if Random_Images:
Select_Images_Randomly()
st.session_state['init_data']['winner_options']=st.session_state['init_data']['current_image_names']
## Button - start game
st.markdown("<h2 style='text-align:left; float:left; color:black; margin:0px;'>2. Press the button to start the game.</h2>", unsafe_allow_html=True)
Use_Images = st.button('START GAME', key='Use_Images')
if Use_Images:
## Choose winner and start game
st.session_state['init_data']['current_winner_index']=random.choice(list(range(0,st.session_state['init_data']['N_images'])))
st.session_state['init_data']['start_game']=True
st.session_state['init_data']['images_selected']=True
## Select images source - Celeba specific path
if Selected_Images_Source=='Use images from specific path':
## Specific source text
st.markdown("<h2 style='text-align:left; float:left; color:black; margin:0px;'>1. Choose the images you like.</h2>",
unsafe_allow_html=True)
st.markdown("<h3 style='text-align:left; float:left; color:gray; margin:0px;'>To use images from specific path, press 'Use Path'. Press it again to randomly modify the selected images.</h3>",
unsafe_allow_html=True)
Uploaded_File = st.file_uploader("Select images to play", type=[".zip"],accept_multiple_files=False, key="Uploaded_file")
if Uploaded_File is not None:
st.session_state['init_data']['zip_file']= Uploaded_File
Select_Images_Randomly()
st.session_state['init_data']['winner_options']=st.session_state['init_data']['current_image_names']
## Button - randomly change Celeba images
Random_Images = st.button('CHANGE IMAGES', key='Random_Images')
if Random_Images:
Select_Images_Randomly()
st.session_state['init_data']['winner_options']=st.session_state['init_data']['current_image_names']
if not (st.session_state['init_data']['zip_file']=='guess_who_images.zip' or st.session_state['init_data']['zip_file']=='guess_who_images_friends.zip' or st.session_state['init_data']['zip_file']=='guess_who_images_family.zip'):
## Button - start game
st.markdown("<h2 style='text-align:left; float:left; color:black; margin:0px;'>2. Press the button to start the game.</h2>", unsafe_allow_html=True)
Use_Images = st.button('START GAME', key='Use_Images')
if Use_Images:
## Choose winner and start game
st.session_state['init_data']['current_winner_index']=random.choice(list(range(0,st.session_state['init_data']['N_images'])))
st.session_state['init_data']['start_game']=True
st.session_state['init_data']['images_selected']=True
## RUN GAME
if st.session_state['init_data']['start_game']:
## Text - Select query type (game mode)
if st.session_state['init_data']['images_selected']:
st.markdown("<h2 style='text-align:left; float:left; color:black; margin:0px;'>3. Select a type of Query to play.</h2>", unsafe_allow_html=True)
else:
st.markdown("<h2 style='text-align:left; float:left; color:black; margin:0px;'>1. Select a type of Query to play.</h2>", unsafe_allow_html=True)
## SelectBox - Select query type (game mode)
Selected_Feature=st.selectbox('Ask a question from a list, create your query or select a winner:', Feature_Options,
index=0,
key='selected_feature', help=None)
## SHOW ELEMENTS - QUESTIONS MODE
if Selected_Feature=='Ask a Question':
## Game mode id
st.session_state['init_data']['token_type']=0
## Text - Questions mode
st.markdown("<h3 style='text-align:left; float:left; color:gray; margin-left:0px; margin-right:0px; margin-top:15px; margin-bottom:-10px;'>Select a Question from the list.</h3>", unsafe_allow_html=True)
## SelectBox - Select question
Selected_Question=st.selectbox('Suggested questions:', st.session_state['init_data']['feature_questions'],
index=0,
key='Selected_Question', help=None)
st.session_state['init_data']['selected_question']=Selected_Question # Save Info
## Current question index
if Selected_Question not in st.session_state['init_data']['feature_questions']:
Selected_Question=st.session_state['init_data']['feature_questions'][0]
st.session_state['init_data']['questions_index']=st.session_state['init_data']['feature_questions'].index(Selected_Question)
## Text - Show current question
st.markdown("<h3 style='text-align:center; float:left; color:blue; margin-left:0px; margin-right:25px; margin-top:0px; margin-bottom:0px;'>Current Question: </h3><h3 style='text-align:left; float:center; color:green; margin:0px;'>"+Selected_Question+"</h3>", unsafe_allow_html=True)
## Button - Use current question
Check_Question = st.button('USE THIS QUESTION', key='Check_Question')
st.session_state['init_data']['button_question']=Check_Question # Save Info
## Check current question
if st.session_state['init_data']['show_results']:
st.session_state['init_data']['show_results']=False
else:
if Check_Question:
if Selected_Question=='Are you bald?':
st.session_state['init_data']['current_querys']=['A picture of a male person','A picture of a female person',
'A picture of a bald man','A picture of a haired man',
'A picture of a bald person','A picture of a person']
st.session_state['init_data']['function_predict']=Predict_bald
elif Selected_Question=='Do you have BLACK HAIR?':
st.session_state['init_data']['current_querys']=['A picture of a black-haired person',
'A picture of a tawny-haired person',
'A picture of a blond-haired person',
'A picture of a gray-haired person',
'A picture of a red-haired person',
'A picture of a green-haired person',
'A picture of a blue-haired person',
'A picture of a bald-head person']
st.session_state['init_data']['function_predict']=Predict_0_vs_all
elif Selected_Question=='Do you have BROWN HAIR?':
st.session_state['init_data']['current_querys']=['A picture of a tawny-haired person',
'A picture of a black-haired person',
'A picture of a blond-haired person',
'A picture of a gray-haired person',
'A picture of a red-haired person',
'A picture of a green-haired person',
'A picture of a blue-haired person',
'A picture of a bald-head person']
st.session_state['init_data']['function_predict']=Predict_0_vs_all
elif Selected_Question=='Do you have BLOND HAIR?':
st.session_state['init_data']['current_querys']=['A picture of a blond-haired person',
'A picture of a tawny-haired person',
'A picture of a black-haired person',
'A picture of a gray-haired person',
'A picture of a red-haired person',
'A picture of a green-haired person',
'A picture of a blue-haired person',
'A picture of a bald-head person']
st.session_state['init_data']['function_predict']=Predict_0_vs_all
elif Selected_Question=='Do you have RED HAIR?':
st.session_state['init_data']['current_querys']=['A picture of a red-haired person',
'A picture of a tawny-haired person',
'A picture of a blond-haired person',
'A picture of a gray-haired person',
'A picture of a black-haired person',
'A picture of a green-haired person',
'A picture of a blue-haired person',
'A picture of a bald-head person']
st.session_state['init_data']['function_predict']=Predict_0_vs_all
elif Selected_Question=='Do you have GRAY HAIR?':
st.session_state['init_data']['current_querys']=['A picture of a gray-haired person',
'A picture of a tawny-haired person',
'A picture of a blond-haired person',
'A picture of a black-haired person',
'A picture of a red-haired person',
'A picture of a green-haired person',
'A picture of a blue-haired person',
'A picture of a bald-head person']
st.session_state['init_data']['function_predict']=Predict_0_vs_all
elif not st.session_state['init_data']['show_results']:
st.session_state['init_data']['current_querys']=[st.session_state['init_data']['querys_list_yes'][st.session_state['init_data']['questions_index']],
st.session_state['init_data']['querys_list_no'][st.session_state['init_data']['questions_index']]]
st.session_state['init_data']['function_predict']=Predict_0_vs_1
CLIP_Process()
st.session_state['init_data']['function_predict']()
st.session_state['init_data']['show_results']=True
## SHOW ELEMENTS - 1 QUERY MOD
if Selected_Feature=='Create your own query':
## Game mode id
st.session_state['init_data']['token_type']=-1
## Text - Query mode
st.markdown("<h3 style='text-align:left; float:left; color:gray; margin-left:0px; margin-right:0px; margin-top:15px; margin-bottom:-10px;'>Write your own query and press the button.</h3>", unsafe_allow_html=True)
## TextInput - Select query
User_Input = st.text_input('It is recommended to use a text like: "A picture of a ... person" or "A picture of a person ..." (CLIP will check -> "Your query" vs "A picture of a person" )', 'A picture of a person', key='User_Input', help=None)
st.session_state['init_data']['user_input']=User_Input # Save Info
## Text - Show current query
st.markdown("<h3 style='text-align:center; float:left; color:blue; margin-left:0px; margin-right:25px; margin-top:0px; margin-bottom:0px;'>Current Query: </h3><h3 style='text-align:left; float:center; color:green; margin:0px;'>"+User_Input+"</h3>", unsafe_allow_html=True)
## Button - Use current query
Check_Query = st.button('USE MY OWN QUERY', key='Check_Query')
st.session_state['init_data']['button_query1']=Check_Query # Save Info
## Check current question
if st.session_state['init_data']['show_results']:
st.session_state['init_data']['show_results']=False
else:
if Check_Query:
if User_Input!='A picture of | |
#!/usr/bin/env python
#
# SPDX-FileCopyrightText: 2021-2022 Espressif Systems (Shanghai) CO LTD
# SPDX-License-Identifier: Apache-2.0
#
import os
import sys
import tempfile
import unittest
from io import StringIO
try:
from ldgen.fragments import Align, Flag, Keep, Sort, Surround, parse_fragment_file
from ldgen.sdkconfig import SDKConfig
except ImportError:
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
from ldgen.fragments import Align, Flag, Keep, Sort, Surround, parse_fragment_file
from ldgen.sdkconfig import SDKConfig
from pyparsing import ParseException, ParseFatalException
class FragmentTest(unittest.TestCase):
def setUp(self):
with tempfile.NamedTemporaryFile(delete=False) as f:
self.kconfigs_source_file = os.path.join(tempfile.gettempdir(), f.name)
with tempfile.NamedTemporaryFile(delete=False) as f:
self.kconfig_projbuilds_source_file = os.path.join(tempfile.gettempdir(), f.name)
os.environ['COMPONENT_KCONFIGS_SOURCE_FILE'] = self.kconfigs_source_file
os.environ['COMPONENT_KCONFIGS_PROJBUILD_SOURCE_FILE'] = self.kconfig_projbuilds_source_file
os.environ['COMPONENT_KCONFIGS'] = ''
os.environ['COMPONENT_KCONFIGS_PROJBUILD'] = ''
# prepare_kconfig_files.py doesn't have to be called because COMPONENT_KCONFIGS and
# COMPONENT_KCONFIGS_PROJBUILD are empty
self.sdkconfig = SDKConfig('data/Kconfig', 'data/sdkconfig')
def tearDown(self):
try:
os.remove(self.kconfigs_source_file)
os.remove(self.kconfig_projbuilds_source_file)
except Exception:
pass
@staticmethod
def create_fragment_file(contents, name='test_fragment.lf'):
f = StringIO(contents)
f.name = name
return f
def test_basic(self):
test_fragment = self.create_fragment_file(u"""
[sections:test]
entries:
value_1
value_2 # comments should be ignored
value_3
# this is a comment as well
value_a
# this is the last comment
""")
fragment_file = parse_fragment_file(test_fragment, self.sdkconfig)
self.assertEqual(fragment_file.fragments[0].name, 'test')
self.assertEqual(fragment_file.fragments[0].entries, {'value_1', 'value_2', 'value_3', 'value_a'})
def test_conditional(self):
test_fragment = self.create_fragment_file(u"""
[sections:test]
entries:
value_1
if A = y:
value_2
value_3
if A = n:
value_4
if B = n:
value_5
""")
fragment_file = parse_fragment_file(test_fragment, self.sdkconfig)
self.assertEqual(fragment_file.fragments[0].name, 'test')
self.assertEqual(fragment_file.fragments[0].entries, {'value_1', 'value_2', 'value_3', 'value_5'})
test_fragment = self.create_fragment_file(u"""
[sections:test]
entries:
value_1
if B = y:
value_2
elif C = y:
value_3
elif A = y:
value_4
else:
value_5
value_6
""")
fragment_file = parse_fragment_file(test_fragment, self.sdkconfig)
self.assertEqual(fragment_file.fragments[0].name, 'test')
self.assertEqual(fragment_file.fragments[0].entries, {'value_1', 'value_3', 'value_6'})
test_fragment = self.create_fragment_file(u"""
[sections:test]
entries:
value_1
if A = y:
value_2
if B = y:
value_3
else:
value_4
if C = y:
value_5
value_6
value_7
""")
fragment_file = parse_fragment_file(test_fragment, self.sdkconfig)
self.assertEqual(fragment_file.fragments[0].name, 'test')
self.assertEqual(fragment_file.fragments[0].entries,
{'value_1', 'value_2', 'value_4', 'value_5', 'value_6', 'value_7'})
test_fragment = self.create_fragment_file(u"""
[sections:test]
entries:
if A = n:
value_2
""")
with self.assertRaises(ParseFatalException):
parse_fragment_file(test_fragment, self.sdkconfig)
def test_empty_file(self):
test_fragment = self.create_fragment_file(u"""
""")
fragment_file = parse_fragment_file(test_fragment, self.sdkconfig)
self.assertEqual(len(fragment_file.fragments), 0)
def test_setting_indent(self):
test_fragment = self.create_fragment_file(u"""
[sections:test]
entries:
value_1
value_2
value_3
""")
fragment_file = parse_fragment_file(test_fragment, self.sdkconfig)
self.assertEqual(fragment_file.fragments[0].name, 'test')
self.assertEqual(fragment_file.fragments[0].entries, {'value_1', 'value_2', 'value_3'})
def test_settings_unmatch_indent(self):
test_fragment = self.create_fragment_file(u"""
[sections:test]
entries:
value_1
value_2 # first element dictates indent
value_3
""")
with self.assertRaises(ParseException):
parse_fragment_file(test_fragment, self.sdkconfig)
def test_unsupported_key(self):
test_fragment = self.create_fragment_file(u"""
[sections:test]
key_1:
value_a
""")
with self.assertRaises(ParseException):
parse_fragment_file(test_fragment, self.sdkconfig)
def test_empty_fragment(self):
test_fragment = self.create_fragment_file(u"""
[sections:test]
""")
with self.assertRaises(ParseException):
parse_fragment_file(test_fragment, self.sdkconfig)
def test_empty_conditional(self):
test_fragment = self.create_fragment_file(u"""
[sections:test]
entries:
if B = y:
else:
value_1
""")
with self.assertRaises(ParseException):
parse_fragment_file(test_fragment, self.sdkconfig)
test_fragment = self.create_fragment_file(u"""
[sections:test]
entries:
if B = y:
value_1
else B = y:
""")
with self.assertRaises(ParseFatalException):
parse_fragment_file(test_fragment, self.sdkconfig)
test_fragment = self.create_fragment_file(u"""
[sections:test]
entries:
if B = y:
value_1
elif B = y:
else:
value_2
""")
with self.assertRaises(ParseFatalException):
parse_fragment_file(test_fragment, self.sdkconfig)
def test_out_of_order_conditional(self):
test_fragment = self.create_fragment_file(u"""
[sections:test]
entries:
elif B = y:
value_1
else:
value_2
""")
with self.assertRaises(ParseException):
parse_fragment_file(test_fragment, self.sdkconfig)
test_fragment = self.create_fragment_file(u"""
[sections:test]
entries:
else:
value_2
""")
with self.assertRaises(ParseException):
parse_fragment_file(test_fragment, self.sdkconfig)
def test_multiple_fragments(self):
test_fragment = self.create_fragment_file(u"""
[sections:test1]
entries:
value_1
[scheme:test2]
entries:
section -> target
""")
fragment_file = parse_fragment_file(test_fragment, self.sdkconfig)
self.assertEqual(fragment_file.fragments[0].name, 'test1')
self.assertEqual(fragment_file.fragments[0].entries, {'value_1'})
self.assertEqual(fragment_file.fragments[1].name, 'test2')
self.assertEqual(fragment_file.fragments[1].entries, {('section', 'target')})
def test_whole_conditional_fragment(self):
test_fragment = self.create_fragment_file(u"""
if B = y:
[sections:test1]
entries:
value_1
else:
[sections:test2]
entries:
value_2
if A = y:
[sections:test3]
entries:
value_3
if C = y:
value_6
[sections:test4]
entries:
value_4
[sections:test5]
entries:
value_5
""")
fragment_file = parse_fragment_file(test_fragment, self.sdkconfig)
self.assertEqual(len(fragment_file.fragments), 4)
self.assertEqual(fragment_file.fragments[0].name, 'test2')
self.assertEqual(fragment_file.fragments[0].entries, {'value_2'})
self.assertEqual(fragment_file.fragments[1].name, 'test3')
self.assertEqual(fragment_file.fragments[1].entries, {'value_3', 'value_6'})
self.assertEqual(fragment_file.fragments[2].name, 'test4')
self.assertEqual(fragment_file.fragments[2].entries, {'value_4'})
self.assertEqual(fragment_file.fragments[3].name, 'test5')
self.assertEqual(fragment_file.fragments[3].entries, {'value_5'})
def test_equivalent_conditional_fragment(self):
test_fragment1 = self.create_fragment_file(u"""
if A = y:
[sections:test1]
entries:
value_1
else:
[sections:test2]
entries:
value_2
""")
fragment_file1 = parse_fragment_file(test_fragment1, self.sdkconfig)
self.assertEqual(fragment_file1.fragments[0].name, 'test1')
self.assertEqual(fragment_file1.fragments[0].entries, {'value_1'})
test_fragment2 = self.create_fragment_file(u"""
[sections:test1]
entries:
if A = y:
value_1
else:
value_2
""")
fragment_file2 = parse_fragment_file(test_fragment2, self.sdkconfig)
self.assertEqual(fragment_file2.fragments[0].name, 'test1')
self.assertEqual(fragment_file2.fragments[0].entries, {'value_1'})
class SectionsTest(FragmentTest):
def test_basic(self):
test_fragment = self.create_fragment_file(u"""
[sections:test]
entries:
.section1
.section2
""")
fragment_file = parse_fragment_file(test_fragment, self.sdkconfig)
self.assertEqual(fragment_file.fragments[0].entries, {'.section1', '.section2'})
def test_duplicate_entries(self):
test_fragment = self.create_fragment_file(u"""
[sections:test]
entries:
.section1
.section2
.section3
.section2
""")
fragment_file = parse_fragment_file(test_fragment, self.sdkconfig)
self.assertEqual(fragment_file.fragments[0].entries, {'.section1', '.section2', '.section3'})
def test_empty_entries(self):
test_fragment = self.create_fragment_file(u"""
[sections:test]
entries:
""")
with self.assertRaises(ParseException):
parse_fragment_file(test_fragment, self.sdkconfig)
test_fragment = self.create_fragment_file(u"""
[sections:test]
entries:
if B = y:
.section1
""")
with self.assertRaises(ParseFatalException):
parse_fragment_file(test_fragment, self.sdkconfig)
def test_entries_grammar(self):
test_fragment = self.create_fragment_file(u"""
[sections:test]
entries:
_valid1
valid2.
.valid3_-
""")
fragment_file = parse_fragment_file(test_fragment, self.sdkconfig)
self.assertEqual(fragment_file.fragments[0].entries,
{'_valid1', 'valid2.', '.valid3_-'})
# invalid starting char
test_fragment = self.create_fragment_file(u"""
[sections:test]
entries:
1invalid
""")
with self.assertRaises(ParseException):
parse_fragment_file(test_fragment, self.sdkconfig)
test_fragment = self.create_fragment_file(u"""
[sections:test]
entries:
-invalid
""")
with self.assertRaises(ParseException):
parse_fragment_file(test_fragment, self.sdkconfig)
# + notation
test_fragment = self.create_fragment_file(u"""
[sections:test]
entries:
valid+
""")
fragment_file = parse_fragment_file(test_fragment, self.sdkconfig)
self.assertEqual(fragment_file.fragments[0].entries,
{'valid+'})
test_fragment = self.create_fragment_file(u"""
[sections:test]
entries:
inva+lid+
""")
with self.assertRaises(ParseException):
parse_fragment_file(test_fragment, self.sdkconfig)
class SchemeTest(FragmentTest):
def test_basic(self):
test_fragment = self.create_fragment_file(u"""
[scheme:test]
entries:
sections1 -> target1
sections2 -> target2
""")
fragment_file = parse_fragment_file(test_fragment, self.sdkconfig)
self.assertEqual(fragment_file.fragments[0].entries,
{('sections1', 'target1'),
('sections2', 'target2')})
def test_duplicate_entries(self):
test_fragment = self.create_fragment_file(u"""
[scheme:test]
entries:
sections1 -> target1
sections2 -> target2
sections2 -> target2
""")
fragment_file = parse_fragment_file(test_fragment, self.sdkconfig)
self.assertEqual(fragment_file.fragments[0].entries,
{('sections1', 'target1'),
('sections2', 'target2')})
def test_empty_entries(self):
test_fragment = self.create_fragment_file(u"""
[scheme:test]
entries:
""")
with self.assertRaises(ParseException):
parse_fragment_file(test_fragment, self.sdkconfig)
test_fragment = self.create_fragment_file(u"""
[scheme:test]
entries:
if B = y:
sections1 -> target1
""")
with self.assertRaises(ParseFatalException):
parse_fragment_file(test_fragment, self.sdkconfig)
def test_improper_grammar(self):
test_fragment = self.create_fragment_file(u"""
[scheme:test]
entries:
sections1, target1 # improper separator
""")
with self.assertRaises(ParseException):
parse_fragment_file(test_fragment, self.sdkconfig)
class MappingTest(FragmentTest):
def test_basic(self):
test_fragment = self.create_fragment_file(u"""
[mapping:test]
archive: lib.a
entries:
obj:symbol (noflash)
obj (noflash)
obj:symbol_2 (noflash)
obj_2 (noflash)
* (noflash)
""")
expected = {('obj', 'symbol', 'noflash'),
('obj', None, 'noflash'),
('obj', 'symbol_2', 'noflash'),
('obj_2', None, 'noflash'),
('*', None, 'noflash')}
fragment_file = parse_fragment_file(test_fragment, self.sdkconfig)
self.assertEqual(expected, fragment_file.fragments[0].entries)
def test_archive(self):
test_fragment = self.create_fragment_file(u"""
[mapping:test]
archive:
entries:
* (default)
""")
with self.assertRaises(ParseException):
parse_fragment_file(test_fragment, self.sdkconfig)
test_fragment = self.create_fragment_file(u"""
[mapping:test]
archive:
lib1.a
lib2.a
entries:
* (default)
""")
with self.assertRaises(ParseFatalException):
parse_fragment_file(test_fragment, self.sdkconfig)
def test_archive_allowed_names(self):
test_fragment = self.create_fragment_file(u"""
[mapping:test]
archive:
libstdc++.a
entries:
* (default)
""")
fragment_file = parse_fragment_file(test_fragment, self.sdkconfig)
self.assertEqual('libstdc++.a', fragment_file.fragments[0].archive)
def test_empty_entries(self):
test_fragment = self.create_fragment_file(u"""
[mapping:test]
archive:
lib.a
entries:
if B = y:
* (noflash) # if condition is false, then no 'entries' key value
""")
expected = set()
fragment_file = parse_fragment_file(test_fragment, self.sdkconfig)
self.assertEqual(expected, fragment_file.fragments[0].entries)
test_fragment = self.create_fragment_file(u"""
[mapping:test]
archive:
lib.a
entries:
""")
with self.assertRaises(ParseException):
parse_fragment_file(test_fragment, self.sdkconfig)
def test_duplicate_entries(self):
test_fragment = self.create_fragment_file(u"""
[mapping:test]
archive:
lib.a
entries:
obj:symbol (noflash)
obj:symbol (noflash)
""")
expected = {('obj', 'symbol', 'noflash')}
fragment_file = parse_fragment_file(test_fragment, self.sdkconfig)
self.assertEqual(expected, fragment_file.fragments[0].entries)
def test_invalid_grammar(self):
test_fragment = self.create_fragment_file(u"""
[mapping:test]
archive:
lib.a
""")
with self.assertRaises(ParseException):
parse_fragment_file(test_fragment, self.sdkconfig)
test_fragment = self.create_fragment_file(u"""
[mapping:test]
entries:
* (default)
""")
with self.assertRaises(ParseException):
parse_fragment_file(test_fragment, self.sdkconfig)
test_fragment = self.create_fragment_file(u"""
[mapping:test]
archive: lib.a
entries:
obj: (noflash)
""")
with self.assertRaises(ParseException):
parse_fragment_file(test_fragment, self.sdkconfig)
test_fragment = self.create_fragment_file(u"""
[mapping:test]
archive: lib.a
entries:
obj: ()
""")
with self.assertRaises(ParseException):
parse_fragment_file(test_fragment, self.sdkconfig)
test_fragment = self.create_fragment_file(u"""
[mapping:test]
archive: lib.a
entries:
obj:symbol
""")
with self.assertRaises(ParseException):
parse_fragment_file(test_fragment, self.sdkconfig)
test_fragment = self.create_fragment_file(u"""
[mapping:test]
archive: lib.a
entries:
(noflash)
""")
with self.assertRaises(ParseException):
parse_fragment_file(test_fragment, self.sdkconfig)
test_fragment = self.create_fragment_file(u"""
[mapping:test]
archive: lib.a
entries:
obj:* (noflash)
""")
with self.assertRaises(ParseException):
parse_fragment_file(test_fragment, self.sdkconfig)
test_fragment = self.create_fragment_file(u"""
[mapping:test]
archive: lib.a
entries:
:symbol (noflash)
""")
with self.assertRaises(ParseException):
parse_fragment_file(test_fragment, self.sdkconfig)
test_fragment = self.create_fragment_file(u"""
[mapping:test]
archive: lib.a
entries:
*:symbol (noflash)
""")
with self.assertRaises(ParseException):
parse_fragment_file(test_fragment, self.sdkconfig)
def test_keep_flag(self):
# Test parsing combinations and orders of flags
test_fragment = self.create_fragment_file(u"""
[mapping:map]
archive: libmain.a
entries:
obj1 (default);
text->flash_text KEEP(),
rodata->flash_rodata KEEP() KEEP()
""")
fragment_file = parse_fragment_file(test_fragment, self.sdkconfig)
fragment = fragment_file.fragments[0]
expected = [Flag('text', 'flash_text', [Keep()]),
Flag('rodata', 'flash_rodata', [Keep(), Keep()])]
actual = fragment.flags[('obj1', None, 'default')]
self.assertEqual(expected, actual)
def test_align_flag(self):
# Test parsing combinations and orders of flags
test_fragment = self.create_fragment_file(u"""
[mapping:map]
archive: libmain.a
entries:
obj1 (default);
text->flash_text ALIGN(8),
rodata->flash_rodata ALIGN(8, pre),
data->dram0_data ALIGN(8, pre, post),
bss->dram0_bss ALIGN(8, post),
common->dram0_bss ALIGN(8, pre, post) ALIGN(8)
""")
fragment_file = parse_fragment_file(test_fragment, self.sdkconfig)
fragment = fragment_file.fragments[0]
expected = [Flag('text', 'flash_text', [Align(8, True, False)]),
Flag('rodata', 'flash_rodata', [Align(8, True, False)]),
Flag('data', 'dram0_data', [Align(8, True, True)]),
Flag('bss', 'dram0_bss', [Align(8, False, True)]),
Flag('common', 'dram0_bss', [Align(8, True, True), Align(8, True, False)])]
actual = fragment.flags[('obj1', None, 'default')]
self.assertEqual(expected, actual)
# Wrong post, pre order
test_fragment = self.create_fragment_file(u"""
[mapping:map]
archive: libmain.a
entries:
obj1 (noflash)
text->iram0_text ALIGN(8, post, pre)
""")
with self.assertRaises(ParseException):
parse_fragment_file(test_fragment, self.sdkconfig)
def test_sort_flag(self):
# Test parsing combinations and orders of flags
test_fragment = self.create_fragment_file(u"""
[mapping:map]
archive: libmain.a
entries:
obj1 (default);
text->flash_text SORT(name),
rodata->flash_rodata SORT(alignment),
data->dram0_data SORT(init_priority),
bss->dram0_bss SORT(name, alignment),
common->dram0_bss SORT(alignment, name),
iram->iram0_text SORT(name, name),
dram->dram0_data SORT(alignment, alignment)
""")
fragment_file = parse_fragment_file(test_fragment, self.sdkconfig)
fragment = fragment_file.fragments[0]
expected = [Flag('text', 'flash_text', [Sort('name')]),
Flag('rodata', 'flash_rodata', [Sort('alignment')]),
Flag('data', 'dram0_data', [Sort('init_priority')]),
Flag('bss', 'dram0_bss', [Sort('name', 'alignment')]),
Flag('common', 'dram0_bss', [Sort('alignment', 'name')]),
Flag('iram', 'iram0_text', [Sort('name', 'name')]),
Flag('dram', 'dram0_data', [Sort('alignment', 'alignment')])]
actual = fragment.flags[('obj1', None, 'default')]
self.assertEqual(expected, actual)
def test_surround_flag(self):
# Test parsing combinations and orders of flags
test_fragment = self.create_fragment_file(u"""
[mapping:map]
archive: libmain.a
entries:
obj1 (default);
text->flash_text SURROUND(sym1)
""")
fragment_file = parse_fragment_file(test_fragment, self.sdkconfig)
fragment = fragment_file.fragments[0]
expected = [Flag('text', 'flash_text', [Surround('sym1')])]
actual = fragment.flags[('obj1', None, 'default')]
self.assertEqual(expected, actual)
def test_flag_order(self):
# Test that the order in which the flags are specified is retained
test_fragment = self.create_fragment_file(u"""
[mapping:map]
archive: libmain.a
entries:
obj1 (default);
text->flash_text ALIGN(4) KEEP() SURROUND(sym1) ALIGN(8) SORT(name),
rodata->flash_rodata KEEP() ALIGN(4) KEEP() SURROUND(sym1) ALIGN(8) ALIGN(4) SORT(name)
""")
fragment_file = parse_fragment_file(test_fragment, self.sdkconfig)
fragment = fragment_file.fragments[0]
expected = [Flag('text', 'flash_text', [Align(4, True, False),
Keep(),
Surround('sym1'),
Align(8, True, False),
Sort('name')]),
Flag('rodata', 'flash_rodata', [Keep(),
Align(4, True, False),
Keep(),
Surround('sym1'),
Align(8, True, False),
Align(4, True, False),
Sort('name')])]
actual = fragment.flags[('obj1', None, 'default')]
self.assertEqual(expected, actual)
def test_flags_entries_multiple_flags(self):
# Not an error, generation step handles this, since
# it that step | |
import io
import datetime
from typing import TYPE_CHECKING, Optional, Union, Type, List, Dict, Awaitable
from .base.http import HTTPRequestBase, EmptyObject
from .model import Channel, Message, MessageReference, AllowedMentions, Snowflake, Embed, Attachment, Overwrite, \
Emoji, User, Interaction, InteractionResponse, Webhook, Guild, ApplicationCommand, Invite, Application, FollowedChannel, \
ThreadMember, ListThreadsResponse, Component, Role, ApplicationCommandOption, GuildApplicationCommandPermissions, \
ApplicationCommandPermissions, VerificationLevel, DefaultMessageNotificationLevel, ExplicitContentFilterLevel, \
SystemChannelFlags, GuildPreview, ChannelTypes, GuildMember, Ban, PermissionFlags, GuildWidget, FILE_TYPE, \
VoiceRegion, Integration, ApplicationCommandTypes, WelcomeScreen, WelcomeScreenChannel, PrivacyLevel, StageInstance, \
AuditLog, AuditLogEvents, GuildTemplate, BYTES_RESPONSE, Sticker, GetGateway, VideoQualityModes, InviteTargetTypes, WidgetStyle, \
GuildScheduledEvent, GuildScheduledEventEntityMetadata, GuildScheduledEventPrivacyLevel, GuildScheduledEventEntityTypes, GuildScheduledEventStatus, \
GuildScheduledEventUser
from .utils import from_emoji, wrap_to_async, to_image_data
if TYPE_CHECKING:
from .base.model import AbstractObject, DiscordObjectBase
class APIClient:
"""
REST API handling client.
Example:
.. code-block:: python
import dico
# For request-based:
api = dico.APIClient("TOKEN", base=dico.HTTPRequest)
# For aiohttp-based:
api = dico.APIClient("TOKEN", base=dico.AsyncHTTPRequest)
...
.. note::
Most of the object parameters accept Snowflake or int or str. For example, you may pass ``832488750034190378`` in Message type.
:param str token: Token of the client.
:param Type[HTTPRequestBase] base: HTTP request handler to use. Must inherit :class:`~.HTTPRequestBase`.
:param Optional[AllowedMentions] default_allowed_mentions: Default allowed mentions object to use. Default None.
:param Optional[Snowflake] application_id: ID of the application. Required if you use interactions.
:param http_options: Options of HTTP request handler.
:ivar HTTPRequestBase ~.http: HTTP request client.
:ivar Optional[AllowedMentions] ~.default_allowed_mentions: Default allowed mentions object of the API client.
:ivar Optional[Application] ~.application: Application object of the client.
:ivar Optional[Snowflake] ~.application_id: ID of the application. Can be ``None``, and if it is, you must pass parameter application_id for all methods that requires it.
"""
def __init__(self,
token: str,
*,
base: Type[HTTPRequestBase],
default_allowed_mentions: Optional[AllowedMentions] = None,
application_id: Optional[Snowflake.TYPING] = None,
**http_options):
self.http: HTTPRequestBase = base.create(token, **http_options)
self.default_allowed_mentions: Optional[AllowedMentions] = default_allowed_mentions
self.application: Optional[Application] = None
self.application_id: Optional[Snowflake] = Snowflake.ensure_snowflake(application_id)
# Audit Log
def request_guild_audit_log(self,
guild: Guild.TYPING,
*,
user: Optional[User.TYPING] = None,
action_type: Optional[Union[int, AuditLogEvents]] = None,
before: Optional["DiscordObjectBase.TYPING"] = None,
limit: Optional[int] = None) -> AuditLog.RESPONSE:
"""
Requests guild audit log.
:param guild: Guild to request audit log.
:param user: Moderator who did the action. Default all.
:param Optional[AuditLogEvents] action_type: Type of the audit log to get.
:param before: Entry object to get before. Can be any object which includes ID.
:param Optional[int] limit: Limit of the number of the audit logs to get.
:return: :class:`~.AuditLog`
"""
if user is not None:
user = str(int(user))
if action_type is not None:
action_type = int(action_type)
if before is not None:
before = str(int(before))
resp = self.http.request_guild_audit_log(int(guild), user, action_type, before, limit)
if isinstance(resp, dict):
return AuditLog(self, resp)
return wrap_to_async(AuditLog, self, resp, as_create=False)
# Channel
def request_channel(self, channel: Channel.TYPING) -> Channel.RESPONSE:
"""
Requests channel object.
:param channel: Channel to get.
:return: :class:`~.Channel`
"""
channel = self.http.request_channel(int(channel))
if isinstance(channel, dict):
return Channel.create(self, channel)
return wrap_to_async(Channel, self, channel)
def modify_guild_channel(self,
channel: Channel.TYPING,
*,
name: Optional[str] = None,
channel_type: Optional[Union[int, ChannelTypes]] = None,
position: Optional[int] = EmptyObject,
topic: Optional[str] = EmptyObject,
nsfw: Optional[bool] = EmptyObject,
rate_limit_per_user: Optional[int] = EmptyObject,
bitrate: Optional[int] = EmptyObject,
user_limit: Optional[int] = EmptyObject,
permission_overwrites: Optional[List[Overwrite]] = EmptyObject,
parent: Optional[Channel.TYPING] = EmptyObject,
rtc_region: Optional[str] = EmptyObject,
video_quality_mode: Optional[Union[int, VideoQualityModes]] = EmptyObject,
reason: Optional[str] = None) -> Channel.RESPONSE:
"""
Modifies guild channel.
.. note::
All keyword-only arguments except name, channel_type, and reason accept None.
:param channel: Channel to edit.
:param Optional[str] name: Name of the channel to change.
:param Optional[ChannelTypes] channel_type: Type of the channel to change.
:param Optional[int] position: Position of the channel to change.
:param Optional[str] topic: Topic of the channel to change.
:param Optional[bool] nsfw: Whether this channel is NSFW.
:param Optional[int] rate_limit_per_user: Slowmode of the channel to change.
:param Optional[int] bitrate: Bitrate of the channel to change.
:param Optional[int] user_limit: User limit of the channel to change.
:param Optional[List[Overwrite]] permission_overwrites: List of permission overwrites to change.
:param parent: Parent category of the channel to change.
:param Optional[str] rtc_region: RTC region of the channel to change. Pass None to set to automatic.
:param Optional[VideoQualityModes] video_quality_mode: Video quality mode of the camera to change.
:param Optional[str] reason: Reason of the action.
:return: :class:`~.Channel`
"""
if permission_overwrites:
permission_overwrites = [x.to_dict() for x in permission_overwrites]
if parent:
parent = int(parent) # noqa
channel = self.http.modify_guild_channel(int(channel), name, int(channel_type), position, topic, nsfw, rate_limit_per_user,
bitrate, user_limit, permission_overwrites, parent, rtc_region, int(video_quality_mode), reason=reason)
if isinstance(channel, dict):
return Channel.create(self, channel)
return wrap_to_async(Channel, self, channel)
def modify_group_dm_channel(self, channel: Channel.TYPING, *, name: Optional[str] = None, icon: Optional[bytes] = None, reason: Optional[str] = None) -> Channel.RESPONSE:
"""
Modifies group DM channel.
:param channel: DM Channel to modify.
:param Optional[str] name: Name to change.
:param Optional[bin] icon: Icon as bytes to change.
:param Optional[str] reason: Reason of the action.
:return: :class:`~.Channel`
"""
channel = self.http.modify_group_dm_channel(int(channel), name, icon, reason=reason)
if isinstance(channel, dict):
return Channel.create(self, channel)
return wrap_to_async(Channel, self, channel)
def modify_thread_channel(self,
channel: Channel.TYPING,
*,
name: Optional[str] = None,
archived: Optional[bool] = None,
auto_archive_duration: Optional[int] = None,
locked: Optional[bool] = None,
rate_limit_per_user: Optional[int] = EmptyObject,
reason: Optional[str] = None) -> Channel.RESPONSE:
"""
Modifies thread channel.
:param channel: Thread channel to modify.
:param Optional[str] name: Name to change.
:param archived: Whether this thread is archived.
:param Optional[int] auto_archive_duration: Auto archive duration to set.
:param Optional[bool] locked: Whether this thread is locked.
:param Optional[int] rate_limit_per_user: Slowmode time to change. Set to None to remove.
:param Optional[str] reason: Reason of the action.
:return: :class:`~.Channel`
"""
channel = self.http.modify_thread_channel(int(channel), name, archived, auto_archive_duration, locked, rate_limit_per_user, reason=reason)
if isinstance(channel, dict):
return Channel.create(self, channel)
return wrap_to_async(Channel, self, channel)
def delete_channel(self, channel: Channel.TYPING, *, reason: Optional[str] = None) -> Channel.RESPONSE:
"""
Deletes channel.
:param channel: Channel to delete.
:param Optional[str] reason: Reason of the action.
:return: :class:`~.Channel`
"""
resp = self.http.delete_channel(int(channel), reason=reason)
if isinstance(resp, dict):
return Channel.create(self, resp, prevent_caching=True)
return wrap_to_async(Channel, self, resp, prevent_caching=True)
def request_channel_messages(self,
channel: Channel.TYPING,
*,
around: Optional[Message.TYPING] = None,
before: Optional[Message.TYPING] = None,
after: Optional[Message.TYPING] = None,
limit: Optional[int] = None) -> Message.RESPONSE_AS_LIST:
"""
Requests list of messages in the channel.
:param channel: Channel to request messages.
:param around: Target message to get around.
:param before: Target message to get before.
:param after: Target message to get after.
:param Optional[int] limit: Limit of numbers of messages to request. Default 50.
:return: List[ :class:`~.Message` ]
"""
messages = self.http.request_channel_messages(int(channel), around and str(int(around)), before and str(int(before)), after and str(int(after)), limit)
# This looks unnecessary, but this is to ensure they are all numbers.
if isinstance(messages, list):
return [Message.create(self, x) for x in messages]
return wrap_to_async(Message, self, messages)
def request_channel_message(self, channel: Channel.TYPING, message: Message.TYPING) -> Message.RESPONSE:
"""
Requests message from channel.
:param channel: Channel to request message.
:param message: Message to request.
:return: :class:`~.Message`
"""
message = self.http.request_channel_message(int(channel), int(message))
if isinstance(message, dict):
return Message.create(self, message)
return wrap_to_async(Message, self, message)
def create_message(self,
channel: Channel.TYPING,
content: Optional[str] = None,
*,
embed: Optional[Union[Embed, dict]] = None,
embeds: Optional[List[Union[Embed, dict]]] = None,
file: Optional[FILE_TYPE] = None,
files: Optional[List[FILE_TYPE]] = None,
tts: Optional[bool] = False,
allowed_mentions: Optional[Union[AllowedMentions, dict]] = None,
message_reference: Optional[Union[Message, MessageReference, dict]] = None,
component: Optional[Union[dict, Component]] = None,
components: Optional[List[Union[dict, Component]]] = None,
sticker: Optional[Sticker.TYPING] = None,
stickers: Optional[List[Sticker.TYPING]] = None) -> Message.RESPONSE:
"""
Creates message to channel.
.. note::
- FileIO object passed to ``file`` or ``files`` parameter will be automatically closed when requesting,
therefore it is recommended to pass file path.
.. warning::
- You must pass at least one of ``content`` or ``embed`` or ``file`` or ``files`` parameter.
- You can't use ``file`` and ``files`` at the same time.
:param channel: Channel to create message.
:param Optional[str] content: Content of the message.
:param embed: Embed of the message.
:type embed: Optional[Union[Embed, dict]]
:param embeds: List of embeds of the message.
:type embeds: Optional[List[Union[Embed, dict]]]
:param file: File of the message.
:type file: Optional[Union[io.FileIO, pathlib.Path, str]]
:param files: Files of the message.
:type files: Optional[List[Union[io.FileIO, pathlib.Path, str]]]
:param Optional[bool] tts: Whether to speak message.
:param allowed_mentions: :class:`~.AllowedMentions` to use for this request.
:type allowed_mentions: Optional[Union[AllowedMentions, dict]]
:param message_reference: Message to reply.
:type message_reference: Optional[Union[Message, MessageReference, dict]]
:param | |
import numpy as np
from gym.spaces import Dict
from rlkit.data_management.replay_buffer import ReplayBuffer
from rlkit.torch.relational.relational_util import get_masks, pad_obs
class ObsDictRelabelingBuffer(ReplayBuffer):
"""
Replay buffer for environments whose observations are dictionaries, such as
- OpenAI Gym GoalEnv environments. https://blog.openai.com/ingredients-for-robotics-research/
- multiworld MultitaskEnv. https://github.com/vitchyr/multiworld/
Implementation details:
- Only add_path is implemented.
- Image observations are presumed to start with the 'image_' prefix
- Every sample from [0, self._size] will be valid.
- Observation and next observation are saved separately. It's a memory
inefficient to save the observations twice, but it makes the code
*much* easier since you no longer have to worry about termination
conditions.
"""
def __init__(
self,
max_size,
env,
fraction_goals_rollout_goals=1.0,
fraction_goals_env_goals=0.0,
internal_keys=None,
observation_key='observation',
achieved_goal_key='achieved_goal',
desired_goal_key='desired_goal',
num_relational=None,
num_heads=1,
max_num_blocks=None,
demonstration_buffer=None,
skip_future_obs_idx=False
):
"""
:param max_size:
:param env:
:param fraction_goals_rollout_goals: Default, no her.
:param fraction_goals_env_goals: What fraction of goals are sampled
"from the environment" assuming that the environment has a "sample
goal" method. The remaining resampled goals are resampled using the
"future" strategy, described in Hindsight Experience Replay.
:param internal_keys: Extra keys in the observation dictoary to save.
Mostly for debugging.
:param observation_key:
:param desired_goal_key:
:param achieved_goal_key:
"""
if internal_keys is None:
internal_keys = []
self.internal_keys = internal_keys
assert isinstance(env.observation_space, Dict)
assert fraction_goals_env_goals >= 0
assert fraction_goals_rollout_goals >= 0
assert fraction_goals_env_goals + fraction_goals_rollout_goals <= 1.0
self.max_size = max_size
self.env = env
self.fraction_goals_rollout_goals = fraction_goals_rollout_goals
self.fraction_goals_env_goals = fraction_goals_env_goals
self.ob_keys_to_save = [
observation_key,
desired_goal_key,
achieved_goal_key,
]
self.observation_key = observation_key
self.desired_goal_key = desired_goal_key
self.achieved_goal_key = achieved_goal_key
self._action_dim = env.action_space.low.size
self._actions = np.zeros((max_size, self._action_dim))
# self._terminals[i] = a terminal was received at time i
self._terminals = np.zeros((max_size, 1), dtype='uint8')
# self._obs[key][i] is the value of observation[key] at time i
self._obs = {}
self._next_obs = {}
ob_spaces = self.env.observation_space.spaces
for key in self.ob_keys_to_save + internal_keys:
assert key in ob_spaces, \
"Key not found in the observation space: %s" % key
type = np.float64
if key.startswith('image'):
type = np.uint8
self._obs[key] = np.zeros(
(max_size, ob_spaces[key].low.size), dtype=type)
self._next_obs[key] = np.zeros(
(max_size, ob_spaces[key].low.size), dtype=type)
self._top = 0 # Used as pointer for adding new samples
self._size = 0 # Used to define maximum sample range
# Let j be any index in self._idx_to_future_obs_idx[i]
# Then self._next_obs[j] is a valid next observation for observation i
self._idx_to_future_obs_idx = [None] * max_size
# self._log_pi = np.zeros((max_size, 1))
# self._log_pi_block2 = np.zeros((max_size, 1))
#
# self._logstd = np.zeros((max_size, 1))
# self._stable_stacked = np.zeros((max_size, 1))
self.num_heads = num_heads
self.num_relational = num_relational
self.max_num_blocks = max_num_blocks if max_num_blocks is not None else self.env.unwrapped.num_blocks # set num_blocks = the blocks of the first environment... which should have max blocks
self._masks = np.zeros((max_size, self.max_num_blocks))
# if self.num_relational:
# self._attn_softmax = np.zeros((max_size, self.num_blocks * self.num_heads * self.num_relational + 1, self.num_blocks))
#
# self._logstd_b1 = np.zeros((max_size, 1)) # Index represents the sample idx, value represents the entropy
# self._logstd_b2 = np.zeros((max_size, 1))
self.key_sizes = dict(observation=15,
desired_goal=3,
achieved_goal=3)
self.demonstration_buffer = demonstration_buffer # A demonstration buffer of fixed size
self.skip_future_obs_idx = skip_future_obs_idx
def add_sample(self, observation, action, reward, terminal,
next_observation, **kwargs):
raise NotImplementedError("Only use add_path")
def terminate_episode(self):
pass
def num_steps_can_sample(self):
return self._size
def pad_all_obs(self, curr_num_blocks=None, max_num_blocks=None):
for key in ['observation', 'desired_goal', 'achieved_goal']:
self._obs[key] = pad_obs(self._obs[key], key=key, key_sizes=self.key_sizes, max_num_blocks=max_num_blocks, curr_num_blocks=curr_num_blocks)
self._next_obs[key] = pad_obs(self._next_obs[key], key=key,key_sizes=self.key_sizes, max_num_blocks=max_num_blocks, curr_num_blocks=curr_num_blocks)
# self._masks = (self._masks, max_num_blocks=max_num_blocks, curr_num_blocks=curr_num_blocks)
self._masks = get_masks(curr_num_blocks=curr_num_blocks, max_num_blocks=max_num_blocks, path_len=self._obs['observation'].shape[0])
def merge(self, other_replay_buffer):
for key in ['observation', 'desired_goal', 'achieved_goal']:
assert len(self._obs[key].shape) == len(other_replay_buffer._obs[key].shape)
assert np.all(self._obs[key].shape[1] == other_replay_buffer._obs[key].shape[1])
self._obs[key] = np.concatenate((self._obs[key], other_replay_buffer._obs[key]), axis=0)
self._next_obs[key] = np.concatenate((self._next_obs[key], other_replay_buffer._next_obs[key]), axis=0)
assert len(self._masks.shape) == len(other_replay_buffer._masks.shape)
assert np.all(self._masks.shape[1] == other_replay_buffer._masks.shape[1])
self._masks = np.concatenate((self._masks, other_replay_buffer._masks))
self._actions = np.concatenate((self._actions, other_replay_buffer._actions))
self._terminals = np.concatenate((self._terminals, other_replay_buffer._terminals))
self._top = (self._top + other_replay_buffer._size) % self.max_size
self._size = min(self._size + other_replay_buffer._size, self.max_size)
def add_path(self, path, curr_num_blocks=None):
obs = path["observations"]
actions = path["actions"]
rewards = path["rewards"]
next_obs = path["next_observations"]
terminals = path["terminals"]
mask = path['mask']
path_len = len(rewards)
actions = flatten_n(actions)
obs = flatten_dict(obs, self.ob_keys_to_save + self.internal_keys)
next_obs = flatten_dict(next_obs,
self.ob_keys_to_save + self.internal_keys)
obs = preprocess_obs_dict(obs)
next_obs = preprocess_obs_dict(next_obs)
if self._top + path_len >= self.max_size:
"""
All of this logic is to handle wrapping the pointer when the
replay buffer gets full.
"""
num_pre_wrap_steps = self.max_size - self._top
# numpy slice
pre_wrap_buffer_slice = np.s_[
self._top:self._top + num_pre_wrap_steps, :
]
pre_wrap_path_slice = np.s_[0:num_pre_wrap_steps, :]
num_post_wrap_steps = path_len - num_pre_wrap_steps
post_wrap_buffer_slice = slice(0, num_post_wrap_steps)
post_wrap_path_slice = slice(num_pre_wrap_steps, path_len)
for buffer_slice, path_slice in [
(pre_wrap_buffer_slice, pre_wrap_path_slice),
(post_wrap_buffer_slice, post_wrap_path_slice),
]:
self._actions[buffer_slice] = actions[path_slice]
self._terminals[buffer_slice] = terminals[path_slice]
self._masks[buffer_slice] = mask[path_slice]
for key in self.ob_keys_to_save + self.internal_keys:
self._obs[key][buffer_slice] = pad_obs(obs[key][path_slice], key, key_sizes=self.key_sizes, max_num_blocks=self.max_num_blocks, curr_num_blocks=curr_num_blocks)
self._next_obs[key][buffer_slice] = pad_obs(next_obs[key][path_slice], key, key_sizes=self.key_sizes, max_num_blocks=self.max_num_blocks, curr_num_blocks=curr_num_blocks)
# Pointers from before the wrap
for i in range(self._top, self.max_size):
self._idx_to_future_obs_idx[i] = np.hstack((
# Pre-wrap indices
np.arange(i, self.max_size),
# Post-wrap indices
np.arange(0, num_post_wrap_steps)
))
# Pointers after the wrap
for i in range(0, num_post_wrap_steps):
self._idx_to_future_obs_idx[i] = np.arange(
i,
num_post_wrap_steps,
)
else:
slc = np.s_[self._top:self._top + path_len, :]
self._actions[slc] = actions
self._terminals[slc] = terminals
if not hasattr(self, "_masks"):
print("_masks not found, creating empty one...")
self._masks[slc] = mask
for key in self.ob_keys_to_save + self.internal_keys:
self._obs[key][slc] = pad_obs(obs[key], key, key_sizes=self.key_sizes, max_num_blocks=self.max_num_blocks, curr_num_blocks=curr_num_blocks)
self._next_obs[key][slc] = pad_obs(next_obs[key], key, key_sizes=self.key_sizes, max_num_blocks=self.max_num_blocks, curr_num_blocks=curr_num_blocks)
if not self.skip_future_obs_idx:
for i in range(self._top, self._top + path_len):
self._idx_to_future_obs_idx[i] = np.arange(
i, self._top + path_len
)
self._top = (self._top + path_len) % self.max_size
self._size = min(self._size + path_len, self.max_size)
def _sample_indices(self, batch_size):
return np.random.randint(0, self._size, batch_size)
# return np.arange(0, batch_size)
def random_batch(self, batch_size):
indices = self._sample_indices(batch_size)
resampled_goals = self._next_obs[self.desired_goal_key][indices]
num_rollout_goals = int(batch_size * self.fraction_goals_rollout_goals)
num_env_goals = int(batch_size * self.fraction_goals_env_goals)
num_future_goals = batch_size - (num_env_goals + num_rollout_goals)
new_obs_dict = self._batch_obs_dict(indices)
new_next_obs_dict = self._batch_next_obs_dict(indices)
if num_env_goals > 0:
env_goals = self.env.unwrapped.sample_goals(num_env_goals)
env_goals = preprocess_obs_dict(env_goals)
last_env_goal_idx = num_rollout_goals + num_env_goals
resampled_goals[num_rollout_goals:last_env_goal_idx] = (
env_goals[self.desired_goal_key]
)
if num_future_goals > 0:
future_obs_idxs = []
for i in indices[-num_future_goals:]:
possible_future_obs_idxs = self._idx_to_future_obs_idx[i]
# This is generally faster than random.choice.
# Makes you wonder what random.choice is doing...
num_options = len(possible_future_obs_idxs)
next_obs_i = int(np.random.randint(0, num_options))
future_obs_idxs.append(possible_future_obs_idxs[next_obs_i])
future_obs_idxs = np.array(future_obs_idxs)
resampled_goals[-num_future_goals:] = (
self._next_obs[self.achieved_goal_key][future_obs_idxs]
)
new_obs_dict[self.desired_goal_key] = resampled_goals
new_next_obs_dict[self.desired_goal_key] = resampled_goals
new_obs_dict = postprocess_obs_dict(new_obs_dict)
new_next_obs_dict = postprocess_obs_dict(new_next_obs_dict)
# resampled_goals must be postprocessed as well
resampled_goals = new_next_obs_dict[self.desired_goal_key]
new_actions = self._actions[indices]
# if self.num_relational:
# new_attn_softmax = self._attn_softmax[indices]
"""
For example, the environments in this repo have batch-wise
implementations of computing rewards:
https://github.com/vitchyr/multiworld
"""
if hasattr(self.env, 'compute_rewards'):
new_rewards = self.env.compute_rewards(
new_actions,
new_next_obs_dict,
)
else: # Assuming it's a (possibly wrapped) gym GoalEnv
new_rewards = np.ones((batch_size, 1))
for i in range(batch_size):
new_rewards[i] = self.env.compute_reward(
new_next_obs_dict[self.achieved_goal_key][i],
new_next_obs_dict[self.desired_goal_key][i],
None
)
new_rewards = new_rewards.reshape(-1, 1)
new_obs = new_obs_dict[self.observation_key]
new_next_obs = new_next_obs_dict[self.observation_key]
new_masks = self._masks[indices]
# Below: indices represent order, value represents index of sample
# argsort in ascending order from smallest (most negative) to largest entropy
# argsort_ = np.argsort(self._logstd_b1[:self._size], axis=0)
# sample_idxs_2_rank = np.zeros_like(self._logstd_b1[:self._size])
#
# for i in range(self._size):
# sample_idxs_2_rank[argsort_[i]] = i
#
# logstd_block1_normalized_ranking = sample_idxs_2_rank[indices] / self._size
# assert (logstd_block1_normalized_ranking <= 1).all() and (logstd_block1_normalized_ranking >= 0).all()
batch = {
'observations': new_obs,
'actions': new_actions,
'rewards': new_rewards,
'terminals': self._terminals[indices],
'next_observations': new_next_obs,
'resampled_goals': resampled_goals,
'indices': np.array(indices).reshape(-1, 1),
'masks': new_masks,
# 'logstd': self._logstd[indices],
# 'stable_stacked': self._stable_stacked[indices],
# 'logstd_block1_normalized_ranking': logstd_block1_normalized_ranking,
}
return batch
def _batch_obs_dict(self, indices):
return {
key: self._obs[key][indices]
for key in self.ob_keys_to_save
}
def _batch_next_obs_dict(self, indices):
return {
key: self._next_obs[key][indices]
for key in self.ob_keys_to_save
}
def flatten_n(xs):
xs = np.asarray(xs)
return xs.reshape((xs.shape[0], -1))
def flatten_dict(dicts, keys):
"""
Input is list of dicts. This operation pulls out the key in each dict and combines the values into a new list mapped to the original key. A new dictionary is formed with these key -> list mappings.
"""
return {
key: flatten_n([d[key] for d in dicts])
for key in keys
}
def preprocess_obs_dict(obs_dict):
"""
Apply internal replay buffer representation changes: save images as bytes
"""
for obs_key, obs in obs_dict.items():
if 'image' in obs_key and obs is not None:
obs_dict[obs_key] = unnormalize_image(obs)
return obs_dict
def postprocess_obs_dict(obs_dict):
"""
Undo internal replay buffer representation changes: save images as bytes
"""
# for obs_key, obs in obs_dict.items():
# if 'image' in obs_key and obs | |
<gh_stars>1-10
from urllib import parse
from typing import Dict, Optional
from tea_client.http import HttpClient
from tea_client.handler import handler
from paperswithcode.config import config
from paperswithcode.models import (
Paper,
Papers,
Repositories,
Conference,
Conferences,
Proceeding,
Proceedings,
Area,
Areas,
Task,
TaskCreateRequest,
TaskUpdateRequest,
Tasks,
Dataset,
DatasetCreateRequest,
DatasetUpdateRequest,
Datasets,
Method,
Methods,
Metric,
Metrics,
MetricCreateRequest,
MetricUpdateRequest,
Result,
Results,
ResultCreateRequest,
ResultUpdateRequest,
EvaluationTable,
EvaluationTables,
EvaluationTableCreateRequest,
EvaluationTableUpdateRequest,
EvaluationTableSyncRequest,
EvaluationTableSyncResponse,
)
class PapersWithCodeClient:
"""PapersWithCode client."""
def __init__(self, token=None, url=None):
url = url or config.server_url
self.http = HttpClient(
url=f"{url}/api/v{config.api_version}",
token=token or "",
authorization_method=HttpClient.Authorization.token,
)
@staticmethod
def __params(page: int, items_per_page: int, **kwargs) -> Dict[str, str]:
params = {key: str(value) for key, value in kwargs.items()}
params["page"] = str(page)
params["items_per_page"] = str(items_per_page)
return params
@staticmethod
def __parse(url: str) -> int:
"""Return page number."""
p = parse.urlparse(url)
if p.query == "":
return 1
else:
q = parse.parse_qs(p.query)
return q.get("page", [1])[0]
@classmethod
def __page(cls, result, page_model):
next_page = result["next"]
if next_page is not None:
next_page = cls.__parse(next_page)
previous_page = result["previous"]
if previous_page is not None:
previous_page = cls.__parse(previous_page)
return page_model(
count=result["count"],
next_page=next_page,
previous_page=previous_page,
results=result["results"],
)
@handler
def paper_list(
self,
q: Optional[str] = None,
arxiv_id: Optional[str] = None,
title: Optional[str] = None,
abstract: Optional[str] = None,
page: int = 1,
items_per_page: int = 50,
) -> Papers:
"""Return a paginated list of papers.
Args:
q (str, optional): Filter papers by querying the paper title and
abstract.
arxiv_id (str, optional): Filter papers by arxiv id.
title (str, optional): Filter papers by part of the title.
abstract (str, optional): Filter papers by part of the abstract.
page (int): Desired page.
items_per_page (int): Desired number of items per page.
Default: 50.
Returns:
Papers: Papers object.
"""
params = self.__params(page, items_per_page)
timeout = None
if q is not None:
params["q"] = q
timeout = 60
if arxiv_id is not None:
params["arxiv_id"] = arxiv_id
if title is not None:
params["title"] = title
if abstract is not None:
params["abstract"] = abstract
timeout = 60
return self.__page(
self.http.get("/papers/", params=params, timeout=timeout), Papers
)
@handler
def paper_get(self, paper_id: str) -> Paper:
"""Return a paper by it's ID.
Args:
paper_id (str): ID of the paper.
Returns:
Paper: Paper object.
"""
return Paper(**self.http.get(f"/papers/{paper_id}/"))
@handler
def paper_dataset_list(
self, paper_id: str, page: int = 1, items_per_page: int = 50
) -> Repositories:
"""Return a list of datasets mentioned in the paper..
Args:
paper_id (str): ID of the paper.
page (int): Desired page.
items_per_page (int): Desired number of items per page.
Default: 50.
Returns:
Datasets: Datasets object.
"""
params = self.__params(page, items_per_page)
return self.__page(
self.http.get(f"/papers/{paper_id}/datasets/", params=params),
Datasets,
)
@handler
def paper_repository_list(
self, paper_id: str, page: int = 1, items_per_page: int = 50
) -> Repositories:
"""Return a list of paper implementations.
Args:
paper_id (str): ID of the paper.
page (int): Desired page.
items_per_page (int): Desired number of items per page.
Default: 50.
Returns:
Repositories: Repositories object.
"""
params = self.__params(page, items_per_page)
return self.__page(
self.http.get(f"/papers/{paper_id}/repositories/", params=params),
Repositories,
)
@handler
def paper_task_list(
self, paper_id: str, page: int = 1, items_per_page: int = 50
) -> Tasks:
"""Return a list of tasks mentioned in the paper.
Args:
paper_id (str): ID of the paper.
page (int): Desired page.
items_per_page (int): Desired number of items per page.
Default: 50.
Returns:
Tasks: Tasks object.
"""
params = self.__params(page, items_per_page)
return self.__page(
self.http.get(f"/papers/{paper_id}/tasks/", params=params), Tasks
)
@handler
def paper_method_list(
self, paper_id: str, page: int = 1, items_per_page: int = 50
) -> Methods:
"""Return a list of methods mentioned in the paper.
Args:
paper_id (str): ID of the paper.
page (int): Desired page.
items_per_page (int): Desired number of items per page.
Default: 50.
Returns:
Methods: Methods object.
"""
params = self.__params(page, items_per_page)
return self.__page(
self.http.get(f"/papers/{paper_id}/methods/", params=params),
Methods,
)
@handler
def paper_result_list(
self, paper_id: str, page: int = 1, items_per_page: int = 50
) -> Results:
"""Return a list of evaluation results for the paper.
Args:
paper_id (str): ID of the paper.
page (int): Desired page.
items_per_page (int): Desired number of items per page.
Default: 50.
Returns:
Results: Results object.
"""
params = self.__params(page, items_per_page)
return self.__page(
self.http.get(f"/papers/{paper_id}/results/", params=params),
Results,
)
@handler
def conference_list(
self,
q: Optional[str] = None,
name: Optional[str] = None,
page: int = 1,
items_per_page: int = 50,
) -> Conferences:
"""Return a paginated list of conferences.
Args:
q (str, optional): Search all searchable fields.
name (str, optional): Filter conferences by part of the name.
page (int): Desired page.
items_per_page (int): Desired number of items per page.
Default: 50.
Returns:
Conferences: Conferences object.
"""
params = self.__params(page, items_per_page)
if q is not None:
params["q"] = q
if name is not None:
params["name"] = name
return self.__page(
self.http.get("/conferences/", params=params), Conferences
)
@handler
def conference_get(self, conference_id: str) -> Conference:
"""Return a conference by it's ID.
Args:
conference_id (str): ID of the conference.
Returns:
Conference: Conference object.
"""
return Conference(**self.http.get(f"/conferences/{conference_id}/"))
@handler
def proceeding_list(
self, conference_id: str, page: int = 1, items_per_page: int = 50
) -> Proceedings:
"""Return a paginated list of conference proceedings.
Args:
conference_id (str): ID of the conference.
page (int): Desired page.
items_per_page (int): Desired number of items per page.
Default: 50.
Returns:
Proceedings: Proceedings object.
"""
return self.__page(
self.http.get(
f"/conferences/{conference_id}/proceedings/",
params=self.__params(page, items_per_page),
),
Proceedings,
)
@handler
def proceeding_get(
self, conference_id: str, proceeding_id: str
) -> Proceeding:
"""Return a conference proceeding by it's ID.
Args:
conference_id (str): ID of the conference.
proceeding_id (str): ID of the proceeding.
Returns:
Proceeding: Proceeding object.
"""
return Proceeding(
**self.http.get(
f"/conferences/{conference_id}/proceedings/{proceeding_id}/"
)
)
@handler
def proceeding_paper_list(
self,
conference_id: str,
proceeding_id: str,
page: int = 1,
items_per_page: int = 50,
) -> Papers:
"""Return a list of papers published in a confernce proceeding.
Args:
conference_id (str): ID of the conference.
proceeding_id (str): ID of the proceding.
page (int): Desired page.
items_per_page (int): Desired number of items per page.
Default: 50.
Returns:
Papers: Papers object.
"""
params = self.__params(page, items_per_page)
return self.__page(
self.http.get(
f"/conferences/{conference_id}/proceedings/{proceeding_id}"
f"/papers/",
params=params,
),
Papers,
)
@handler
def area_list(
self,
q: Optional[str] = None,
name: Optional[str] = None,
page: int = 1,
items_per_page: int = 50,
) -> Areas:
"""Return a paginated list of areas.
Args:
q (str, optional): Filter areas by querying the area name.
name (str, optional): Filter areas by part of the name.
page (int): Desired page.
items_per_page (int): Desired number of items per page.
Default: 50.
Returns:
Areas: Areas object.
"""
params = self.__params(page, items_per_page)
timeout = None
if q is not None:
params["q"] = q
timeout = 60
if name is not None:
params["name"] = name
return self.__page(
self.http.get("/areas/", params=params, timeout=timeout), Areas
)
@handler
def area_get(self, area_id: str) -> Area:
"""Return an area by it's ID.
Args:
area_id (str): ID of the area.
Returns:
Area: Area object.
"""
return Area(**self.http.get(f"/areas/{area_id}/"))
@handler
def area_task_list(
self, area_id: str, page: int = 1, items_per_page: int = 50
) -> Tasks:
"""Return a paginated list of tasks in an area.
Args:
area_id (str): ID of the area.
page (int): Desired page.
items_per_page (int): Desired number of items per page.
Default: 50.
Returns:
Tasks: Tasks object.
"""
params = self.__params(page, items_per_page)
return self.__page(
self.http.get(f"/areas/{area_id}/tasks/", params=params), Tasks
)
@handler
def task_list(
self,
q: Optional[str] = None,
name: Optional[str] = None,
page: int = 1,
items_per_page: int = 50,
) -> Tasks:
"""Return a paginated list of tasks.
Args:
q (str, optional): Filter tasks by querying the task name.
name (str, optional): Filter tasks by part of th name.
page (int): Desired page.
items_per_page (int): Desired number of items per page.
Default: 50.
Returns:
Tasks: Tasks object.
"""
params = self.__params(page, items_per_page)
timeout = None
if q is not None:
params["q"] = q
timeout = 60
if name is not None:
params["name"] = name
return self.__page(
self.http.get("/tasks/", params=params, timeout=timeout), Tasks
)
@handler
def task_get(self, task_id: str) -> Task:
"""Return a task by it's ID.
Args:
task_id (str): ID of the task.
Returns:
Task: Task object.
"""
return Task(**self.http.get(f"/tasks/{task_id}/"))
@handler
def task_add(self, task: TaskCreateRequest) -> Task:
"""Add a task.
Args:
task (TaskCreateRequest): Task create request.
Returns:
Task: Created task.
"""
return Task(**self.http.post("/tasks/", | |
<reponame>bruinxiong/pytorch3d<gh_stars>0
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
"""
Sanity checks for output images from the renderer.
"""
import os
import unittest
from collections import namedtuple
import numpy as np
import torch
from common_testing import (
TestCaseMixin,
get_pytorch3d_dir,
get_tests_dir,
load_rgb_image,
)
from PIL import Image
from pytorch3d.io import load_obj
from pytorch3d.renderer.cameras import (
FoVOrthographicCameras,
FoVPerspectiveCameras,
OrthographicCameras,
PerspectiveCameras,
look_at_view_transform,
)
from pytorch3d.renderer.lighting import AmbientLights, PointLights
from pytorch3d.renderer.materials import Materials
from pytorch3d.renderer.mesh import TexturesAtlas, TexturesUV, TexturesVertex
from pytorch3d.renderer.mesh.rasterizer import MeshRasterizer, RasterizationSettings
from pytorch3d.renderer.mesh.renderer import MeshRenderer, MeshRendererWithFragments
from pytorch3d.renderer.mesh.shader import (
BlendParams,
HardFlatShader,
HardGouraudShader,
HardPhongShader,
SoftPhongShader,
SoftSilhouetteShader,
TexturedSoftPhongShader,
)
from pytorch3d.structures.meshes import (
Meshes,
join_meshes_as_batch,
join_meshes_as_scene,
)
from pytorch3d.utils.ico_sphere import ico_sphere
from pytorch3d.utils.torus import torus
# If DEBUG=True, save out images generated in the tests for debugging.
# All saved images have prefix DEBUG_
DEBUG = False
DATA_DIR = get_tests_dir() / "data"
TUTORIAL_DATA_DIR = get_pytorch3d_dir() / "docs/tutorials/data"
ShaderTest = namedtuple("ShaderTest", ["shader", "reference_name", "debug_name"])
class TestRenderMeshes(TestCaseMixin, unittest.TestCase):
def test_simple_sphere(self, elevated_camera=False, check_depth=False):
"""
Test output of phong and gouraud shading matches a reference image using
the default values for the light sources.
Args:
elevated_camera: Defines whether the camera observing the scene should
have an elevation of 45 degrees.
"""
device = torch.device("cuda:0")
# Init mesh
sphere_mesh = ico_sphere(5, device)
verts_padded = sphere_mesh.verts_padded()
faces_padded = sphere_mesh.faces_padded()
feats = torch.ones_like(verts_padded, device=device)
textures = TexturesVertex(verts_features=feats)
sphere_mesh = Meshes(verts=verts_padded, faces=faces_padded, textures=textures)
# Init rasterizer settings
if elevated_camera:
# Elevated and rotated camera
R, T = look_at_view_transform(dist=2.7, elev=45.0, azim=45.0)
postfix = "_elevated_"
# If y axis is up, the spot of light should
# be on the bottom left of the sphere.
else:
# No elevation or azimuth rotation
R, T = look_at_view_transform(2.7, 0.0, 0.0)
postfix = "_"
for cam_type in (
FoVPerspectiveCameras,
FoVOrthographicCameras,
PerspectiveCameras,
OrthographicCameras,
):
cameras = cam_type(device=device, R=R, T=T)
# Init shader settings
materials = Materials(device=device)
lights = PointLights(device=device)
lights.location = torch.tensor([0.0, 0.0, +2.0], device=device)[None]
raster_settings = RasterizationSettings(
image_size=512, blur_radius=0.0, faces_per_pixel=1
)
rasterizer = MeshRasterizer(
cameras=cameras, raster_settings=raster_settings
)
blend_params = BlendParams(1e-4, 1e-4, (0, 0, 0))
# Test several shaders
shader_tests = [
ShaderTest(HardPhongShader, "phong", "hard_phong"),
ShaderTest(HardGouraudShader, "gouraud", "hard_gouraud"),
ShaderTest(HardFlatShader, "flat", "hard_flat"),
]
for test in shader_tests:
shader = test.shader(
lights=lights,
cameras=cameras,
materials=materials,
blend_params=blend_params,
)
if check_depth:
renderer = MeshRendererWithFragments(
rasterizer=rasterizer, shader=shader
)
images, fragments = renderer(sphere_mesh)
self.assertClose(fragments.zbuf, rasterizer(sphere_mesh).zbuf)
# Check the alpha channel is the mask
self.assertClose(
images[..., -1], (fragments.pix_to_face[..., 0] >= 0).float()
)
else:
renderer = MeshRenderer(rasterizer=rasterizer, shader=shader)
images = renderer(sphere_mesh)
rgb = images[0, ..., :3].squeeze().cpu()
filename = "simple_sphere_light_%s%s%s.png" % (
test.reference_name,
postfix,
cam_type.__name__,
)
image_ref = load_rgb_image("test_%s" % filename, DATA_DIR)
self.assertClose(rgb, image_ref, atol=0.05)
if DEBUG:
debug_filename = "simple_sphere_light_%s%s%s.png" % (
test.debug_name,
postfix,
cam_type.__name__,
)
filename = "DEBUG_%s" % debug_filename
Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
DATA_DIR / filename
)
########################################################
# Move the light to the +z axis in world space so it is
# behind the sphere. Note that +Z is in, +Y up,
# +X left for both world and camera space.
########################################################
lights.location[..., 2] = -2.0
phong_shader = HardPhongShader(
lights=lights,
cameras=cameras,
materials=materials,
blend_params=blend_params,
)
if check_depth:
phong_renderer = MeshRendererWithFragments(
rasterizer=rasterizer, shader=phong_shader
)
images, fragments = phong_renderer(sphere_mesh, lights=lights)
self.assertClose(
fragments.zbuf, rasterizer(sphere_mesh, lights=lights).zbuf
)
# Check the alpha channel is the mask
self.assertClose(
images[..., -1], (fragments.pix_to_face[..., 0] >= 0).float()
)
else:
phong_renderer = MeshRenderer(
rasterizer=rasterizer, shader=phong_shader
)
images = phong_renderer(sphere_mesh, lights=lights)
rgb = images[0, ..., :3].squeeze().cpu()
if DEBUG:
filename = "DEBUG_simple_sphere_dark%s%s.png" % (
postfix,
cam_type.__name__,
)
Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
DATA_DIR / filename
)
image_ref_phong_dark = load_rgb_image(
"test_simple_sphere_dark%s%s.png" % (postfix, cam_type.__name__),
DATA_DIR,
)
self.assertClose(rgb, image_ref_phong_dark, atol=0.05)
def test_simple_sphere_elevated_camera(self):
"""
Test output of phong and gouraud shading matches a reference image using
the default values for the light sources.
The rendering is performed with a camera that has non-zero elevation.
"""
self.test_simple_sphere(elevated_camera=True)
def test_simple_sphere_depth(self):
"""
Test output of phong and gouraud shading matches a reference image using
the default values for the light sources.
The rendering is performed with a camera that has non-zero elevation.
"""
self.test_simple_sphere(check_depth=True)
def test_simple_sphere_screen(self):
"""
Test output when rendering with PerspectiveCameras & OrthographicCameras
in NDC vs screen space.
"""
device = torch.device("cuda:0")
# Init mesh
sphere_mesh = ico_sphere(5, device)
verts_padded = sphere_mesh.verts_padded()
faces_padded = sphere_mesh.faces_padded()
feats = torch.ones_like(verts_padded, device=device)
textures = TexturesVertex(verts_features=feats)
sphere_mesh = Meshes(verts=verts_padded, faces=faces_padded, textures=textures)
R, T = look_at_view_transform(2.7, 0.0, 0.0)
# Init shader settings
materials = Materials(device=device)
lights = PointLights(device=device)
lights.location = torch.tensor([0.0, 0.0, +2.0], device=device)[None]
raster_settings = RasterizationSettings(
image_size=512, blur_radius=0.0, faces_per_pixel=1
)
for cam_type in (PerspectiveCameras, OrthographicCameras):
cameras = cam_type(
device=device,
R=R,
T=T,
principal_point=((256.0, 256.0),),
focal_length=((256.0, 256.0),),
image_size=((512, 512),),
)
rasterizer = MeshRasterizer(
cameras=cameras, raster_settings=raster_settings
)
blend_params = BlendParams(1e-4, 1e-4, (0, 0, 0))
shader = HardPhongShader(
lights=lights,
cameras=cameras,
materials=materials,
blend_params=blend_params,
)
renderer = MeshRenderer(rasterizer=rasterizer, shader=shader)
images = renderer(sphere_mesh)
rgb = images[0, ..., :3].squeeze().cpu()
filename = "test_simple_sphere_light_phong_%s.png" % cam_type.__name__
image_ref = load_rgb_image(filename, DATA_DIR)
self.assertClose(rgb, image_ref, atol=0.05)
def test_simple_sphere_batched(self):
"""
Test a mesh with vertex textures can be extended to form a batch, and
is rendered correctly with Phong, Gouraud and Flat Shaders with batched
lighting and hard and soft blending.
"""
batch_size = 5
device = torch.device("cuda:0")
# Init mesh with vertex textures.
sphere_meshes = ico_sphere(5, device).extend(batch_size)
verts_padded = sphere_meshes.verts_padded()
faces_padded = sphere_meshes.faces_padded()
feats = torch.ones_like(verts_padded, device=device)
textures = TexturesVertex(verts_features=feats)
sphere_meshes = Meshes(
verts=verts_padded, faces=faces_padded, textures=textures
)
# Init rasterizer settings
dist = torch.tensor([2.7]).repeat(batch_size).to(device)
elev = torch.zeros_like(dist)
azim = torch.zeros_like(dist)
R, T = look_at_view_transform(dist, elev, azim)
cameras = FoVPerspectiveCameras(device=device, R=R, T=T)
raster_settings = RasterizationSettings(
image_size=512, blur_radius=0.0, faces_per_pixel=4
)
# Init shader settings
materials = Materials(device=device)
lights_location = torch.tensor([0.0, 0.0, +2.0], device=device)
lights_location = lights_location[None].expand(batch_size, -1)
lights = PointLights(device=device, location=lights_location)
blend_params = BlendParams(1e-4, 1e-4, (0, 0, 0))
# Init renderer
rasterizer = MeshRasterizer(cameras=cameras, raster_settings=raster_settings)
shader_tests = [
ShaderTest(HardPhongShader, "phong", "hard_phong"),
ShaderTest(SoftPhongShader, "phong", "soft_phong"),
ShaderTest(HardGouraudShader, "gouraud", "hard_gouraud"),
ShaderTest(HardFlatShader, "flat", "hard_flat"),
]
for test in shader_tests:
reference_name = test.reference_name
debug_name = test.debug_name
shader = test.shader(
lights=lights,
cameras=cameras,
materials=materials,
blend_params=blend_params,
)
renderer = MeshRenderer(rasterizer=rasterizer, shader=shader)
images = renderer(sphere_meshes)
image_ref = load_rgb_image(
"test_simple_sphere_light_%s_%s.png"
% (reference_name, type(cameras).__name__),
DATA_DIR,
)
for i in range(batch_size):
rgb = images[i, ..., :3].squeeze().cpu()
if i == 0 and DEBUG:
filename = "DEBUG_simple_sphere_batched_%s_%s.png" % (
debug_name,
type(cameras).__name__,
)
Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
DATA_DIR / filename
)
self.assertClose(rgb, image_ref, atol=0.05)
def test_silhouette_with_grad(self):
"""
Test silhouette blending. Also check that gradient calculation works.
"""
device = torch.device("cuda:0")
sphere_mesh = ico_sphere(5, device)
verts, faces = sphere_mesh.get_mesh_verts_faces(0)
sphere_mesh = Meshes(verts=[verts], faces=[faces])
blend_params = BlendParams(sigma=1e-4, gamma=1e-4)
raster_settings = RasterizationSettings(
image_size=512,
blur_radius=np.log(1.0 / 1e-4 - 1.0) * blend_params.sigma,
faces_per_pixel=80,
clip_barycentric_coords=True,
)
# Init rasterizer settings
R, T = look_at_view_transform(2.7, 0, 0)
for cam_type in (
FoVPerspectiveCameras,
FoVOrthographicCameras,
PerspectiveCameras,
OrthographicCameras,
):
cameras = cam_type(device=device, R=R, T=T)
# Init renderer
renderer = MeshRenderer(
rasterizer=MeshRasterizer(
cameras=cameras, raster_settings=raster_settings
),
shader=SoftSilhouetteShader(blend_params=blend_params),
)
images = renderer(sphere_mesh)
alpha = images[0, ..., 3].squeeze().cpu()
if DEBUG:
filename = os.path.join(
DATA_DIR, "DEBUG_%s_silhouette.png" % (cam_type.__name__)
)
Image.fromarray((alpha.detach().numpy() * 255).astype(np.uint8)).save(
filename
)
ref_filename = "test_%s_silhouette.png" % (cam_type.__name__)
image_ref_filename = DATA_DIR / ref_filename
with Image.open(image_ref_filename) as raw_image_ref:
image_ref = torch.from_numpy(np.array(raw_image_ref))
image_ref = image_ref.to(dtype=torch.float32) / 255.0
self.assertClose(alpha, image_ref, atol=0.055)
# Check grad exist
verts.requires_grad = True
sphere_mesh = Meshes(verts=[verts], faces=[faces])
images = renderer(sphere_mesh)
images[0, ...].sum().backward()
self.assertIsNotNone(verts.grad)
def test_texture_map(self):
"""
Test a mesh with a texture map is loaded and rendered correctly.
The pupils in the eyes of the cow should always be looking to the left.
"""
device = torch.device("cuda:0")
obj_filename = TUTORIAL_DATA_DIR / "cow_mesh/cow.obj"
# Load mesh + texture
verts, faces, aux = load_obj(
obj_filename, device=device, load_textures=True, texture_wrap=None
)
tex_map = list(aux.texture_images.values())[0]
tex_map = tex_map[None, ...].to(faces.textures_idx.device)
textures = TexturesUV(
maps=tex_map, faces_uvs=[faces.textures_idx], verts_uvs=[aux.verts_uvs]
)
mesh = Meshes(verts=[verts], faces=[faces.verts_idx], textures=textures)
# Init rasterizer settings
R, T = look_at_view_transform(2.7, 0, 0)
cameras = FoVPerspectiveCameras(device=device, R=R, T=T)
raster_settings = RasterizationSettings(
image_size=512, blur_radius=0.0, faces_per_pixel=1
)
# Init shader settings
materials = Materials(device=device)
lights = PointLights(device=device)
# Place light behind the cow in world space. The front of
# the cow is facing the -z direction.
lights.location = torch.tensor([0.0, 0.0, 2.0], device=device)[None]
blend_params = BlendParams(
sigma=1e-1,
gamma=1e-4,
background_color=torch.tensor([1.0, 1.0, 1.0], device=device),
)
# Init renderer
renderer = MeshRenderer(
rasterizer=MeshRasterizer(cameras=cameras, raster_settings=raster_settings),
shader=TexturedSoftPhongShader(
lights=lights,
cameras=cameras,
materials=materials,
blend_params=blend_params,
),
)
# Load reference image
image_ref = load_rgb_image("test_texture_map_back.png", DATA_DIR)
for bin_size in | |
<filename>ae.py<gh_stars>1-10
"""
@summary: Python attribute editor template extending Maya's
Some methods have been re-implemented to have better control
Coding convention assimilates Maya's camelCase on purpose
@run: import coop.ae as cae (suggested)
@license: MIT
@repository: https://github.com/artineering-io/maya-coop
"""
from __future__ import print_function
from __future__ import unicode_literals
import maya.cmds as cmds
import maya.mel as mel
from . import lib as clib
from . import qt as cqt
from . import logger as clog
from PySide2 import QtWidgets
from functools import partial
LOG = clog.logger("coop.ae")
ATTR_WIDGETS = dict() # Index of Custom Attribute Widgets
PLAIN_ATTR_DATA = dict()
class AETemplate(object):
"""
Base class for python Attribute Editor templates.
Based on the Template object from:
Python/Lib/site-packages/maya/internal/common/ae/template.py
Extended for actual production usage
"""
def __init__(self, node_name, extra_attributes=True):
"""
Template constructor
Args:
node_name (unicode): Node name passed from mel template
extra_attributes (bool): If 'Extra Attributes' should be automatically added
"""
self.nodeName = node_name
cmds.editorTemplate(beginScrollLayout=True)
self.build_ui(node_name)
if extra_attributes:
cmds.editorTemplate(addExtraControls=True)
cmds.editorTemplate(endScrollLayout=True)
@staticmethod
def suppress(control):
"""
Supress control (attribute) from appearing in the attribute editor
Args:
control (unicode): Name of control (attribute) to suppress
"""
cmds.editorTemplate(suppress=control)
@staticmethod
def add_control(control, ann="", lab="", callback=None):
"""
Adds a named control
Args:
control (unicode): Name of control (attribute) to add
ann (unicode): Annotation to appear in the tooltip (if any)
lab (unicode): Nice name of attribute (if any)
callback (func): Function to call if something happens
"""
# print("add_control('{}', '{}', '{}', {})".format(control, ann, lab, callback))
if clib.get_maya_version() > 2020:
control = clib.u_enlist(control)
if callback:
control.append(callback)
if lab:
cmds.editorTemplate(label=lab, ann=ann, addControl=control)
else:
cmds.editorTemplate(ann=ann, addControl=control)
else:
# mel as cmds.editorTemplate doesn't work below 2022
cmd = 'editorTemplate -ann "{}"'.format(ann)
if lab:
cmd += ' -lab "{}"'.format(lab)
cmd += ' -addControl "{}"'.format(control)
if callback:
clib.print_warning("Callbacks are not supported by add_control() on Maya < 2022")
clib.print_info("Use custom_control(PlainAttrGrp()) instead")
mel.eval(cmd)
# control_name = cmds.editorTemplate(queryName=[self.nodeName, control[0]])
# Note: the command above returns None until the AE is shown, so we can't query this here
def add_controls(self, controls):
"""
Adds a list of controls
Args:
controls (list): List of controls to add (string names)
"""
for c in controls:
self.add_control(c)
@staticmethod
def separator(add=True):
"""
Adds a separator to the template.
Args:
add (bool): If separator should be added or not
"""
if add:
cmds.editorTemplate(addSeparator=True)
def build_ui(self, node_name):
"""
This method needs to be overriden to create the custom UI
Args:
node_name (unicode): Name of the node to build UI for
"""
raise NotImplementedError("build_ui() has not been implemented")
def suppress_caching_frozen_node_state(self):
""" Suppresses the caching, frozen and nodeState attributes from appearing in the Attribute Editor """
self.suppress("caching")
self.suppress("frozen")
self.suppress("nodeState")
def call_template(self, template_name):
"""
Appends an attribute editor template
Args:
template_name (unicode): Node name of the attribute editor template
"""
mel.eval(u'AE{0}Template {1}'.format(template_name, self.nodeName))
@staticmethod
def call_custom(new_proc, replace_proc, module, *args):
"""
If targeting only Maya 2022+. use custom_control() instead
Calls a custom command to generate custom UIs in the attribute editor.
The callCustom flag of editorTemplate only works with mel commands, this method creates a mel wrapper to
call Python functions within the module.
Args:
new_proc (unicode): Procedure to add a new UI item
replace_proc (unicode): Procedure to edit a UI item depending on selection
module (unicode): Module where the python versions of the new and replace functions are
*args (any): Arguments to pass onto the procedure
"""
import_cmd = 'python("import {}");'.format(module) # importing the module where the python functions are
new_proc_cmd = 'global proc {}('.format(new_proc)
replace_proc_cmd = 'global proc {}('.format(replace_proc)
mel_cmd = 'editorTemplate -callCustom "{}" "{}" '.format(new_proc, replace_proc)
py_args = ""
mel_fmt_args = ""
# build callCustom commands and procedures
for i, arg in enumerate(args):
mel_fmt_args += "-stringArg $arg{} ".format(i + 1)
if clib.is_string(arg):
mel_cmd += '"{}" '.format(arg)
py_args += "'^{}s', ".format(i + 1)
new_proc_cmd += "string $arg{}, ".format(i + 1)
replace_proc_cmd += "string $arg{}, ".format(i + 1)
else:
mel_cmd += '{} '.format(arg)
py_args = '{}, '.format(arg)
if isinstance(arg, int):
new_proc_cmd += "int $arg{}, ".format(i)
replace_proc_cmd += "int $arg{}, ".format(i)
elif isinstance(arg, float):
new_proc_cmd += "float $arg{}, ".format(i)
replace_proc_cmd += "float $arg{}, ".format(i)
else:
cmds.error("Variable of type '{}' has not been implemented yet in call_custom".format(type(arg)))
mel_cmd = mel_cmd[:-1] + ";"
new_proc_cmd = new_proc_cmd[:-2] + ') { python('
replace_proc_cmd = replace_proc_cmd[:-2] + ') { python('
if mel_fmt_args:
new_proc_cmd += "`format " + mel_fmt_args
replace_proc_cmd += "`format " + mel_fmt_args
new_proc_cmd += '"{}.{}('.format(module, new_proc)
replace_proc_cmd += '"{}.{}('.format(module, replace_proc)
new_proc_cmd += py_args[:-2]
replace_proc_cmd += py_args[:-2]
new_proc_cmd += ')"'
replace_proc_cmd += ')"'
if mel_fmt_args:
new_proc_cmd += '`'
replace_proc_cmd += '`'
new_proc_cmd += ');}'
replace_proc_cmd += ');}'
# debug mel commands
# print(new_proc_cmd)
# print(replace_proc_cmd)
# print(mel_cmd)
# evaluate mel commands
mel.eval(import_cmd)
mel.eval(new_proc_cmd)
mel.eval(replace_proc_cmd)
mel.eval(mel_cmd)
@staticmethod
def custom_control(custom_obj, attrs):
"""
Adds a custom control to the template.
Args:
custom_obj (class): The custom control object,
A class with buildControlUI() and replaceControlUI()
attrs (unicode, list): The attributes that this control manages
"""
# print("custom_control({}, {})".format(custom_obj, attrs))
if clib.get_maya_version() > 2020:
def create(*args):
custom_obj.on_create(args) # calls build_control_ui()
def replace(*args):
custom_obj.on_replace(args) # calls replace_control_ui()
cmds.editorTemplate(attrs, callCustom=[create, replace])
else:
# mel wrapping it is because cmds.editorTemplate doesn't work properly prior Maya 2022
global PLAIN_ATTR_DATA
PLAIN_ATTR_DATA[attrs] = custom_obj.build_kwargs # we store the widget format data in a global
AETemplate.call_custom("_ae_plain_attr_new", "_ae_plain_attr_replace", __name__,
attrs)
class Layout:
"""
Editor template layout which enables the use of:
with self.Layout(name, collapse):
pass
"""
def __init__(self, name, collapse=False):
self.collapse = collapse
self.name = name
def __enter__(self):
cmds.editorTemplate(beginLayout=self.name, collapse=self.collapse)
def __exit__(self, mytype, value, tb):
cmds.editorTemplate(endLayout=True)
##################################################################################
class CustomControl(object):
"""
Base class for custom controls within the attribute editor.
Based on the CustomControl object from:
Python/Lib/site-packages/maya/internal/common/ae/common.py
Extended for actual production usage
This virtual class helps generate custom Maya control objects.
It has intrinsic members and the 'build' and 'replace' methods, which need to be
overwritten for it to work as intended
"""
def __init__(self, *args, **kwargs):
"""
Constructor of Custom Control initializing class variables
Args:
*args: Arguments that were passed to the custom control
**kwargs: Keyword arguments (dict) that were passed to the custom control
"""
self.node_name = None
self.plug_name = None
self.attr_name = None
self.build_args = args
self.build_kwargs = kwargs
# args will collect all attributes that connected to this custom control
def on_create(self, *args):
"""
Run when the Custom Control is created
Args:
*args: Arguments that were passed to the control (usually node.attr it controls)
"""
control_args = args[0]
self.plug_name = control_args[0] if control_args else ""
self.node_name, self.attr_name = clib.split_node_attr(self.plug_name)
parent_name = cmds.setParent(q=True)
cmds.scriptJob(uiDeleted=[parent_name, self.on_close], runOnce=True)
self.build_control_ui()
def on_replace(self, *args):
"""
Run when the Custom Control is replaced/updated as the context has changed
Args:
*args: Arguments that were passed to the control (usually new node.attr it controls)
"""
control_args = args[0]
self.plug_name = control_args[0] if control_args else ""
self.node_name, self.attr_name = clib.split_node_attr(self.plug_name)
self.replace_control_ui()
def on_close(self):
""" Override this class with the commands to 'close' the UI """
pass
def build_control_ui(self):
""" Override this class with the commands to 'build' the UI """
pass
def replace_control_ui(self):
""" Override this class with the commands to 'replace' the UI """
pass
def set_enable(self, enable):
""" Override this class with the commands to 'enable/disable' the UI """
pass
class PlainAttrGrp(CustomControl):
""" Maya attribute controls created depending on the type of the attribute """
def build_control_ui(self):
""" Builds the custom control UI """
node, attr = clib.split_node_attr(self.plug_name)
if not cmds.attributeQuery(attr, n=node, ex=True):
LOG.error("{} doesn't exist".format(self.plug_name))
return
cmds.setUITemplate("attributeEditorTemplate", pushTemplate=True)
try:
_plain_attr_widget(self.plug_name, self.build_kwargs)
finally:
cmds.setUITemplate(popTemplate=True)
def replace_control_ui(self):
""" Updates/replaces the custom control UI """
_plain_attr_widget_update(self.plug_name, self.build_kwargs.get('callback', None))
def _plain_attr_widget(node_attr, kwargs):
"""
Creates a plain attribute widget depending on the type of attribute
Args:
node_attr (unicode): The plug name in the form of 'node.attr'
kwargs (dict): Keyword arguments that were passed to the custom control
"""
global ATTR_WIDGETS # keeps track of the created controls for the different node attributes
node, attr = clib.split_node_attr(node_attr)
lab = kwargs.get('lab', cmds.attributeQuery(attr, n=node, niceName=True))
ann = kwargs.get('ann', "")
callback = kwargs.get('callback', None)
if callback:
callback = partial(callback, node) # better than using lambdas
obj_type = cmds.objectType(node)
widget_name = "{}{}".format(obj_type, attr)
_check_attr_widgets(widget_name)
attr_type = cmds.attributeQuery(attr, n=node, attributeType=True)
ctrl = ""
if attr_type == "float":
if "map" not in | |
<gh_stars>0
from py42._compat import str
from py42._compat import UserDict
from py42._compat import UserList
from py42.clients.settings import check_lock
from py42.clients.settings import SettingProperty
from py42.clients.settings import show_change
from py42.clients.settings._converters import bool_to_str
from py42.clients.settings._converters import days_to_minutes
from py42.clients.settings._converters import minutes_to_days
from py42.clients.settings._converters import str_to_bool
from py42.exceptions import Py42Error
invalid_destination_error = Py42Error(
u"Invalid destination guid or destination not offered to device's Org."
)
destination_not_added_error = Py42Error(
u"Destination is not added to device, unable to lock."
)
class DeviceSettingsDefaults(UserDict, object):
"""Class used for managing an Organization's Device Default settings. Also acts as a
base class for `DeviceSettings` to manage individual device settings."""
def __init__(self, device_dict, org_settings):
self.data = device_dict
self._org_settings = org_settings
self.changes = org_settings.changes
self._destinations = org_settings.data[u"settings"][u"destinations"]
self.data[u"settings"] = {
u"serviceBackupConfig": self.data[u"serviceBackupConfig"]
}
bs = self.data[u"serviceBackupConfig"][u"backupConfig"][u"backupSets"]
self.backup_sets = self._extract_backup_sets(bs)
def _extract_backup_sets(self, backup_sets):
if isinstance(backup_sets, dict): # number of sets are locked
backup_sets = backup_sets["backupSet"]
if isinstance(backup_sets, dict): # there's only one set configured
return [BackupSet(self, backup_sets)]
elif isinstance(backup_sets, list):
return [BackupSet(self, bs) for bs in backup_sets]
else:
raise Py42Error("Unable to extract backup sets: {}".format(backup_sets))
else:
return [BackupSet(self, bs) for bs in backup_sets]
@property
def available_destinations(self):
"""Returns a dict of destinations available to be used by devices. Dict keys are
destination guids and values are destination names.
"""
return {d[u"guid"]: d[u"destinationName"] for d in self._destinations}
warning_email_enabled = SettingProperty(
name=u"warning_email_enabled",
location=[u"settings", u"serviceBackupConfig", u"warningEmailEnabled"],
get_converter=str_to_bool,
set_converter=bool_to_str,
)
"""Determines if backup "warning" threshold email alerts are configured for this device."""
critical_email_enabled = SettingProperty(
name=u"critical_email_enabled",
location=[u"settings", u"serviceBackupConfig", u"severeEmailEnabled"],
get_converter=str_to_bool,
set_converter=bool_to_str,
)
"""Determines if backup "critical" threshold email alerts are configured for this device."""
warning_alert_days = SettingProperty(
name=u"warning_alert_days",
location=[u"settings", u"serviceBackupConfig", u"minutesUntilWarning"],
get_converter=minutes_to_days,
set_converter=days_to_minutes,
)
"""The number of days a device can go without any backup activity before
"warning" alert threshold is passed.
"""
critical_alert_days = SettingProperty(
name=u"critical_alert_days",
location=[u"settings", u"serviceBackupConfig", u"minutesUntilSevere"],
get_converter=minutes_to_days,
set_converter=days_to_minutes,
)
"""The number of days a device can go without any backup activity before "warning"
alert threshold is passed.
"""
backup_status_email_enabled = SettingProperty(
name=u"backup_status_email_enabled",
location=[u"settings", u"serviceBackupConfig", u"backupStatusEmailEnabled"],
get_converter=str_to_bool,
set_converter=bool_to_str,
)
"""Determines if the regularly scheduled backup status email is enabled."""
backup_status_email_frequency_days = SettingProperty(
name=u"backup_status_email_frequency_days",
location=[
u"settings",
u"serviceBackupConfig",
u"backupStatusEmailFreqInMinutes",
],
get_converter=minutes_to_days,
set_converter=days_to_minutes,
)
"""Determines the frequency of the regularly scheduled backup status email."""
def __repr__(self):
return u"<DeviceSettingsDefaults: org_id: {}>".format(self._org_settings.org_id)
class DeviceSettings(DeviceSettingsDefaults):
"""Class used to manage an individual device's settings."""
def __init__(self, device_dict):
self.changes = {}
self.data = device_dict
self._destinations = device_dict[u"availableDestinations"]
bs = self.data[u"settings"][u"serviceBackupConfig"][u"backupConfig"][
u"backupSets"
]
self.backup_sets = self._extract_backup_sets(bs)
"""List of :class:`BackupSet` objects used to manage this device's backup set configurations."""
@property
def computer_id(self):
"""Identifier of this device. Read-only."""
return self.data[u"computerId"]
@property
def device_id(self):
"""Identifier of this device (alias of `.computer_id`). Read only."""
return self.computer_id
@property
def guid(self):
"""Globally unique identifier of this device. Read-only."""
return self.data[u"guid"]
@property
def org_id(self):
"""Identifier of the organization this device belongs to. Read-only."""
return self.data[u"orgId"]
@property
def user_id(self):
"""Identifier of the user this device belongs to. Read-only."""
return self.data[u"userId"]
@property
def version(self):
"""Latest reported Code42 client version number for this device. Read-only."""
return self.data[u"version"]
name = SettingProperty(name=u"name", location=[u"name"])
"""Name for this device."""
external_reference = SettingProperty(
name=u"external_reference", location=[u"computerExtRef"]
)
"""External reference field for this device."""
notes = SettingProperty(name=u"notes", location=[u"notes"])
"""Notes field for this device."""
def __repr__(self):
return u"<DeviceSettings: guid: {}, name: {}>".format(
self.data[u"guid"], self.data[u"name"]
)
class BackupSet(UserDict, object):
"""Helper class for managing device backup sets and Org device default backup sets."""
def __init__(self, settings_manager, backup_set_dict):
self._manager = settings_manager
self._changes = settings_manager.changes
self.data = backup_set_dict
includes, excludes = self._extract_file_selection_lists()
regex_excludes = self._extract_regex_exclusions()
self._included_files = TrackedFileSelectionList(
self, u"included_files", includes, self._changes
)
self._excluded_files = TrackedFileSelectionList(
self, u"excluded_files", excludes, self._changes
)
self._filename_exclusions = TrackedFileSelectionList(
self, u"filename_exclusions", regex_excludes, self._changes
)
self._orig_destinations = self.destinations
def _extract_file_selection_lists(self):
"""Converts the file selection portion of the settings dict ("pathset") into two
lists of just paths, `included` and `excluded`.
The "pathset" object is a different shape depending on how many paths it
contains and whether its locked or not:
No paths: `[{"@cleared": "true", "@os": "Linux"}]`
No paths locked: `{'@locked': 'true', 'paths': {'@cleared': 'true', '@os': 'Linux'}}`
One path: `[{"path": {"@include": "C:/"}, "@os": "Linux"}]`
One path locked: `{'@locked': 'true', 'paths': {'@os': 'Linux', 'path': {'@include': 'C:/'}}}`
One+ paths: `[{"path": [{"@include": "C:/Users/"},{"@exclude": "C:/Users/Admin/"},],"@os": "Linux"}]`
One+ paths locked: `{'@locked': 'true', 'paths': {'@os': 'Linux', 'path': [{'@include': 'C:/Users/'}, {'@exclude': 'C:/Users/Admin/'}]}}`
"""
pathset = self.data[u"backupPaths"][u"pathset"]
if isinstance(pathset, dict): # pathset is locked
path_list = pathset[u"paths"].get(u"path")
else:
path_list = pathset[0].get(u"path")
# no paths selected
if path_list is None:
return [], []
# one path selected
if isinstance(path_list, dict):
path_list = [path_list]
includes = [p[u"@include"] for p in path_list if u"@include" in p]
excludes = [p[u"@exclude"] for p in path_list if u"@exclude" in p]
return includes, excludes
def _extract_regex_exclusions(self):
"""Converts the filename exclusion portion ("excludeUser") of the settings dict
into a simple list of regex patterns.
The "excludeUser" object is a different shape based on the number of exclusion
patterns present and whether the setting is locked or not:
No exclusions: `[{"windows": [], "linux": [], "macintosh": []}]`
No exclusions locked: `{'@locked': 'true', 'patternList': {'windows': [], 'macintosh': [], 'linux': []}}`
One exclusion: `[{"windows": [], "pattern": {"@regex": ".*"}, "linux": [], "macintosh": []}]`
One exclusion locked: `{'@locked': 'true', 'patternList': {'pattern': {'@regex': '.*'}, 'windows': [], 'macintosh': [], 'linux': []}}`
One+ exclusions: `[{"windows": [], "pattern": [{"@regex": ".*1"}, {"@regex": ".*2"}],"linux": [],"macintosh": []}]
One+ exclusion locked: `{'@locked': 'true', 'patternList': {'pattern': [{'@regex': '.*1'}, {'@regex': '.*2'}], 'windows': [], 'macintosh': [], 'linux': []}}`
"""
exclude_user = self.data[u"backupPaths"][u"excludeUser"]
if isinstance(exclude_user, dict): # exclusions are locked
pattern_list = exclude_user[u"patternList"].get(u"pattern")
else:
pattern_list = exclude_user[0].get(u"pattern")
if not pattern_list:
return []
if isinstance(pattern_list, dict):
pattern_list = [pattern_list]
return [p[u"@regex"] for p in pattern_list]
def _build_file_selection(self):
"""Converts the user-friendly lists of included and excluded file paths back
into a "pathset" object the api expects. Called whenever one of the file selection
property lists (`.included_files`, `.excluded_files`) is modified.
"""
paths = {u"@os": u"Linux", u"path": []}
if not self._included_files: # ignore excluded values if nothing is included
paths[u"@cleared"] = u"true"
else:
path_list = []
for path in self._included_files:
path_list.append({u"@include": path, u"@und": u"false"})
for path in self._excluded_files:
path_list.append({u"@exclude": path, u"@und": u"false"})
paths[u"path"] = path_list
paths[u"@cleared"] = u"false"
self.data[u"backupPaths"][u"pathset"] = {u"paths": paths}
def _build_regex_exclusions(self):
"""Converts the user-friendly list of filename exclusions back into the
"excludeUser" object the api expects. Called whenever the `.filename_exclusions`
property list is modified.
"""
patterns = []
for regex in self._filename_exclusions:
patterns.append({u"@regex": regex})
user_exclude_dict = {
u"patternList": {
u"pattern": patterns,
u"windows": {u"pattern": []},
u"macintosh": {u"pattern": []},
u"linux": {u"pattern": []},
}
}
self.data[u"backupPaths"][u"excludeUser"] = user_exclude_dict
@property
def locked(self):
"""Indicates whether the backup set as a whole is locked. If True, individual
settings for this backup set (except for Destination settings), cannot be modified.
"""
return u"@locked" in self.data and str_to_bool(self.data[u"@locked"])
@property
def included_files(self):
"""Returns the list of files/folders included in the backup selection. Items can
be added/removed from this list via normal list methods, or assigning a new list
of files to this attribute to replace the existing one.
"""
return self._included_files
@included_files.setter
def included_files(self, value):
if isinstance(value, (list, tuple)):
self._included_files.clear()
self._included_files.extend(value)
else:
raise AttributeError(u"included files must be a list/tuple.")
@property
def excluded_files(self):
"""Returns the list of files/folders excluded from the backup selection. Items can
be added/removed from this list via normal list methods, or assigning a new list
of files to this attribute to replace the existing one.
"""
return self._excluded_files
@excluded_files.setter
def excluded_files(self, value):
if isinstance(value, (list, tuple)):
self._excluded_files.clear()
self._excluded_files.extend(value)
else:
raise AttributeError(u"excluded files must be a list/tuple.")
@property
def filename_exclusions(self):
"""Returns the list of regex patterns used to exclude file paths from the backup
selection. Items can be added/removed from this list via normal list methods,
or assigning a new list of patterns to this attribute to replace the existing
one.
"""
return self._filename_exclusions
@filename_exclusions.setter
def filename_exclusions(self, value):
if isinstance(value, (list, tuple)):
self._filename_exclusions.clear()
self._filename_exclusions.extend(value)
else:
raise AttributeError(u"filename exclusions must be a list/tuple.")
@property
def destinations(self):
"""Returns a dict of the destinations used for backup for the backup set. Dict
keys are the destination guids, values are the destination names.
"""
destination_dict = {}
if u"@cleared" in self.data[u"destinations"]:
return destination_dict
for d in self.data[u"destinations"]:
guid = d[u"@id"]
| |
# -*- mode: python; coding: utf-8 -*-
# Copyright (c) 2020 Radio Astronomy Software Group
# Licensed under the 2-clause BSD License
"""Tests for Mir class.
Performs a series of test for the Mir class, which inherits from UVData. Note that
there is a separate test module for the MirParser class (mir_parser.py), which is
what is used to read the raw binary data into something that the Mir class can
manipulate into a UVData object.
"""
import os
import pytest
import numpy as np
from ... import UVData
from ...data import DATA_PATH
from ...uvdata.mir import mir_parser
@pytest.fixture
def mir_data_object():
testfile = os.path.join(DATA_PATH, "sma_test.mir")
mir_data = mir_parser.MirParser(
testfile, load_vis=True, load_raw=True, load_auto=True,
)
yield mir_data
# cleanup
del mir_data
@pytest.fixture
def uv_in_ms(tmp_path):
uv_in = UVData()
testfile = os.path.join(DATA_PATH, "sma_test.mir")
write_file = os.path.join(tmp_path, "outtest_mir.ms")
# Currently only one source is supported.
uv_in.read(testfile)
uv_out = UVData()
yield uv_in, uv_out, write_file
# cleanup
del uv_in, uv_out
@pytest.fixture
def uv_in_uvfits(tmp_path):
uv_in = UVData()
testfile = os.path.join(DATA_PATH, "sma_test.mir/")
write_file = os.path.join(tmp_path, "outtest_mir.uvfits")
# Currently only one source is supported.
uv_in.read(testfile, pseudo_cont=True)
uv_out = UVData()
yield uv_in, uv_out, write_file
# cleanup
del uv_in, uv_out
@pytest.fixture
def uv_in_uvh5(tmp_path):
uv_in = UVData()
testfile = os.path.join(DATA_PATH, "sma_test.mir")
write_file = os.path.join(tmp_path, "outtest_mir.uvh5")
# Currently only one source is supported.
uv_in.read(testfile)
uv_out = UVData()
yield uv_in, uv_out, write_file
# cleanup
del uv_in, uv_out
@pytest.mark.filterwarnings("ignore:LST values stored in this file are not ")
@pytest.mark.parametrize("future_shapes", [True, False])
def test_read_mir_write_uvfits(uv_in_uvfits, future_shapes):
"""
Mir to uvfits loopback test.
Read in Mir files, write out as uvfits, read back in and check for
object equality.
"""
mir_uv, uvfits_uv, testfile = uv_in_uvfits
if future_shapes:
mir_uv.use_future_array_shapes()
mir_uv.write_uvfits(testfile, spoof_nonessential=True)
uvfits_uv.read_uvfits(testfile)
if future_shapes:
uvfits_uv.use_future_array_shapes()
# UVFITS doesn't allow for numbering of spectral windows like MIR does, so
# we need an extra bit of handling here
assert len(np.unique(mir_uv.spw_array)) == len(np.unique(uvfits_uv.spw_array))
spw_dict = {idx: jdx for idx, jdx in zip(uvfits_uv.spw_array, mir_uv.spw_array)}
assert np.all(
[
idx == spw_dict[jdx]
for idx, jdx in zip(mir_uv.flex_spw_id_array, uvfits_uv.flex_spw_id_array,)
]
)
# Now that we've checked, set this things as equivalent
uvfits_uv.spw_array = mir_uv.spw_array
uvfits_uv.flex_spw_id_array = mir_uv.flex_spw_id_array
# Check the history first via find
assert 0 == uvfits_uv.history.find(
mir_uv.history + " Read/written with pyuvdata version:"
)
mir_uv.history = uvfits_uv.history
# We have to do a bit of special handling for the phase_center_catalog, because
# _very_ small errors (like last bit in the mantissa) creep in when passing through
# the util function transform_sidereal_coords (for mutli-phase-ctr datasets). Verify
# the two match up in terms of their coordinates
for cat_name in mir_uv.phase_center_catalog.keys():
assert np.isclose(
mir_uv.phase_center_catalog[cat_name]["cat_lat"],
uvfits_uv.phase_center_catalog[cat_name]["cat_lat"],
)
assert np.isclose(
mir_uv.phase_center_catalog[cat_name]["cat_lon"],
uvfits_uv.phase_center_catalog[cat_name]["cat_lon"],
)
uvfits_uv.phase_center_catalog = mir_uv.phase_center_catalog
# There's a minor difference between what SMA calculates online for app coords
# and what pyuvdata calculates, to the tune of ~1 arcsec. Check those values here,
# then set them equal to one another.
assert np.all(
np.abs(mir_uv.phase_center_app_ra - uvfits_uv.phase_center_app_ra) < 1e-5
)
assert np.all(
np.abs(mir_uv.phase_center_app_dec - uvfits_uv.phase_center_app_dec) < 1e-5
)
mir_uv._set_app_coords_helper()
uvfits_uv._set_app_coords_helper()
# make sure filenames are what we expect
assert mir_uv.filename == ["sma_test.mir"]
assert uvfits_uv.filename == ["outtest_mir.uvfits"]
mir_uv.filename = uvfits_uv.filename
assert mir_uv == uvfits_uv
# Since mir is mutli-phase-ctr by default, this should effectively be a no-op
mir_uv._set_multi_phase_center()
assert mir_uv == uvfits_uv
@pytest.mark.filterwarnings("ignore:LST values stored in this file are not ")
@pytest.mark.parametrize("future_shapes", [True, False])
def test_read_mir_write_ms(uv_in_ms, future_shapes):
"""
Mir to uvfits loopback test.
Read in Mir files, write out as ms, read back in and check for
object equality.
"""
pytest.importorskip("casacore")
mir_uv, ms_uv, testfile = uv_in_ms
if future_shapes:
mir_uv.use_future_array_shapes()
mir_uv.write_ms(testfile, clobber=True)
ms_uv.read(testfile)
if future_shapes:
ms_uv.use_future_array_shapes()
# There are some minor differences between the values stored by MIR and that
# calculated by UVData. Since MS format requires these to be calculated on the fly,
# we calculate them here just to verify that everything is looking okay.
mir_uv.set_lsts_from_time_array()
mir_uv._set_app_coords_helper()
# These reorderings just make sure that data from the two formats are lined up
# correctly.
mir_uv.reorder_freqs(spw_order="number")
ms_uv.reorder_blts()
# MS doesn't have the concept of an "instrument" name like FITS does, and instead
# defaults to the telescope name. Make sure that checks out here.
assert mir_uv.instrument == "SWARM"
assert ms_uv.instrument == "SMA"
mir_uv.instrument = ms_uv.instrument
# Quick check for history here
assert ms_uv.history != mir_uv.history
ms_uv.history = mir_uv.history
# Only MS has extra keywords, verify those look as expected.
assert ms_uv.extra_keywords == {"DATA_COL": "DATA", "observer": "SMA"}
assert mir_uv.extra_keywords == {}
mir_uv.extra_keywords = ms_uv.extra_keywords
# Make sure the filenames line up as expected.
assert mir_uv.filename == ["sma_test.mir"]
assert ms_uv.filename == ["outtest_mir.ms"]
mir_uv.filename = ms_uv.filename = None
# Finally, with all exceptions handled, check for equality.
assert ms_uv == mir_uv
@pytest.mark.filterwarnings("ignore:LST values stored ")
def test_read_mir_write_uvh5(uv_in_uvh5):
"""
Mir to uvfits loopback test.
Read in Mir files, write out as uvfits, read back in and check for
object equality.
"""
mir_uv, uvh5_uv, testfile = uv_in_uvh5
mir_uv.write_uvh5(testfile)
uvh5_uv.read_uvh5(testfile)
# Check the history first via find
assert 0 == uvh5_uv.history.find(
mir_uv.history + " Read/written with pyuvdata version:"
)
# test fails because of updated history, so this is our workaround for now.
mir_uv.history = uvh5_uv.history
# make sure filenames are what we expect
assert mir_uv.filename == ["sma_test.mir"]
assert uvh5_uv.filename == ["outtest_mir.uvh5"]
mir_uv.filename = uvh5_uv.filename
assert mir_uv == uvh5_uv
def test_write_mir(uv_in_uvfits, err_type=NotImplementedError):
"""
Mir writer test
Check and make sure that attempts to use the writer return a
'not implemented' error.
"""
mir_uv, uvfits_uv, testfile = uv_in_uvfits
# Check and see if the correct error is raised
with pytest.raises(err_type):
mir_uv.write_mir("dummy.mir")
def test_multi_nchan_spw_read(tmp_path):
"""
Mir to uvfits error test for spws of different sizes.
Read in Mir files, write out as uvfits, read back in and check for
object equality.
"""
testfile = os.path.join(DATA_PATH, "sma_test.mir")
uv_in = UVData()
uv_in.read_mir(testfile, corrchunk=[0, 1, 2, 3, 4])
dummyfile = os.path.join(tmp_path, "dummy.mirtest.uvfits")
with pytest.raises(IndexError):
uv_in.write_uvfits(dummyfile, spoof_nonessential=True)
def test_read_mir_no_records():
"""
Mir no-records check
Make sure that mir correctly handles the case where no matching records are found
"""
testfile = os.path.join(DATA_PATH, "sma_test.mir")
uv_in = UVData()
with pytest.raises(IndexError, match="No valid sources selected!"):
uv_in.read_mir(testfile, isource=-1)
with pytest.raises(IndexError, match="No valid records matching those selections!"):
uv_in.read_mir(testfile, irec=-1)
with pytest.raises(IndexError, match="No valid sidebands selected!"):
uv_in.read_mir(testfile, isb=[])
with pytest.raises(IndexError, match="isb values contain invalid entries"):
uv_in.read_mir(testfile, isb=[-156])
def test_read_mir_sideband_select():
"""
Mir sideband read check
Make sure that we can read the individual sidebands out of MIR correctly, and then
stitch them back together as though they were read together from the start.
"""
testfile = os.path.join(DATA_PATH, "sma_test.mir")
mir_dsb = UVData()
mir_dsb.read(testfile)
# Re-order here so that we can more easily compare the two
mir_dsb.reorder_freqs(channel_order="freq", spw_order="freq")
# Drop the history
mir_dsb.history = ""
mir_lsb = UVData()
mir_lsb.read(testfile, isb=[0])
mir_usb = UVData()
mir_usb.read(testfile, isb=[1])
mir_recomb = mir_lsb + mir_usb
# Re-order here so that we can more easily compare the two
mir_recomb.reorder_freqs(spw_order="freq", channel_order="freq")
# Drop the history
mir_recomb.history = ""
assert mir_dsb == mir_recomb
def test_mir_auto_read(
err_type=IndexError, err_msg="Could not determine auto-correlation record size!"
):
"""
Mir read tester
Make sure that Mir autocorrelations are read correctly
"""
testfile = os.path.join(DATA_PATH, "sma_test.mir")
mir_data = mir_parser.MirParser(testfile, has_auto=True)
with pytest.raises(err_type, match=err_msg):
ac_data = mir_data.scan_auto_data(testfile, nchunks=999)
ac_data = mir_data.scan_auto_data(testfile)
assert np.all(ac_data["nchunks"] == 8)
mir_data.load_data(load_vis=False, load_auto=True)
# Select the relevant auto records, which should be for spwin 0-3
auto_data = mir_data.read_auto_data(testfile, ac_data)[:, 0:4, :, :]
assert np.all(
np.logical_or(
auto_data == mir_data.auto_data,
np.logical_and(np.isnan(auto_data), np.isnan(mir_data.auto_data)),
)
)
mir_data.unload_data()
# Below are a series of checks that are designed to check to make sure that the
# MirParser class is able to produce consistent values from an engineering data
# set (originally stored in /data/engineering/mir_data/200724_16:35:14), to make
# sure that we haven't broken the ability of the reader to handle the data. Since
# this file is the basis for the above checks, we've put this here rather than in
# test_mir_parser.py
def test_mir_remember_me_record_lengths(mir_data_object):
"""
Mir record length checker
Make sure the test file containts the right number of records
"""
mir_data = mir_data_object
# Check to make sure we've got the right number of records everywhere
# ac_read only exists if has_auto=True
if mir_data.ac_read is not None:
assert len(mir_data.ac_read) == 2
else:
# This should only occur when has_auto=False
assert not mir_data._has_auto
assert len(mir_data.bl_read) == 4
assert len(mir_data.codes_read) == 99
assert len(mir_data.eng_read) | |
'S', 'E', 'S', 'S', 'W', 'W', 'W', 'W', 'N', 'W', 'W', 'N', 'N', 'E', 'E', 'N', 'N', 'E', 'N']
# E7 F5 G5 H5
# Score 4 for path: ['E', 'S', 'E', 'S', 'S', 'W', 'W', 'W', 'W', 'N', 'W', 'W', 'N', 'N', 'E', 'E', 'N', 'E', 'N', 'N']
# E7 F5 G5 H5
# Score 4 for path: ['E', 'S', 'E', 'S', 'S', 'W', 'W', 'W', 'W', 'N', 'W', 'W', 'N', 'N', 'E', 'E', 'E', 'N', 'N', 'N']
# E7 F5 G5 H5
# Score 4 for path: ['E', 'S', 'E', 'S', 'S', 'W', 'W', 'W', 'W', 'W', 'N', 'N', 'W', 'N', 'E', 'E', 'N', 'N', 'E', 'N']
# E7 F5 G5 H7
# Score 4 for path: ['E', 'S', 'E', 'S', 'S', 'W', 'W', 'W', 'W', 'W', 'N', 'N', 'W', 'N', 'E', 'E', 'N', 'E', 'N', 'N']
# E7 F5 G5 H7
# Score 4 for path: ['E', 'S', 'E', 'S', 'S', 'W', 'W', 'W', 'W', 'W', 'N', 'E', 'N', 'W', 'N', 'E', 'N', 'N', 'E', 'N']
# D4 E7 F5 H7
# Score 4 for path: ['E', 'S', 'E', 'S', 'S', 'W', 'W', 'W', 'W', 'W', 'N', 'E', 'N', 'W', 'N', 'E', 'N', 'E', 'N', 'N']
# D4 E7 F5 H7
# Score 4 for path: ['E', 'S', 'E', 'S', 'S', 'W', 'W', 'W', 'W', 'W', 'N', 'W', 'N', 'N', 'E', 'E', 'N', 'N', 'E', 'N']
# E7 F5 G5 H5
# Score 4 for path: ['E', 'S', 'E', 'S', 'S', 'W', 'W', 'W', 'W', 'W', 'N', 'W', 'N', 'N', 'E', 'E', 'N', 'E', 'N', 'N']
# E7 F5 G5 H5
# Score 4 for path: ['E', 'S', 'E', 'S', 'S', 'W', 'W', 'W', 'W', 'W', 'N', 'W', 'N', 'N', 'E', 'E', 'E', 'N', 'N', 'N']
# E7 F5 G5 H5
# Score 4 for path: ['E', 'S', 'E', 'S', 'S', 'W', 'W', 'W', 'W', 'W', 'W', 'N', 'N', 'N', 'E', 'E', 'N', 'N', 'E', 'N']
# E7 F5 G5 H5
# Score 4 for path: ['E', 'S', 'E', 'S', 'S', 'W', 'W', 'W', 'W', 'W', 'W', 'N', 'N', 'N', 'E', 'E', 'N', 'E', 'N', 'N']
# E7 F5 G5 H5
# Score 4 for path: ['E', 'S', 'E', 'S', 'S', 'W', 'W', 'W', 'W', 'W', 'W', 'N', 'N', 'N', 'E', 'E', 'E', 'N', 'N', 'N']
# E7 F5 G5 H5
# Score 4 for path: ['E', 'E', 'S', 'S', 'S', 'W', 'W', 'W', 'W', 'W', 'N', 'E', 'N', 'W', 'N', 'E', 'N', 'N', 'N', 'E']
# D4 D5 F5 H7
# Score 4 for path: ['E', 'E', 'S', 'S', 'S', 'W', 'W', 'W', 'W', 'W', 'N', 'E', 'N', 'W', 'N', 'E', 'N', 'N', 'E', 'N']
# D4 D5 F5 H7
# Score 4 for path: ['W', 'S', 'E', 'E', 'S', 'S', 'W', 'W', 'W', 'W', 'N', 'W', 'N', 'N', 'E', 'E', 'N', 'N', 'E', 'N']
# E7 F7 G5 H5
# Score 4 for path: ['W', 'S', 'E', 'E', 'S', 'S', 'W', 'W', 'W', 'W', 'W', 'N', 'N', 'N', 'E', 'E', 'N', 'N', 'E', 'N']
# E7 F7 G5 H5
# Score 4 for path: ['N', 'N', 'N', 'E', 'N', 'N', 'N', 'E', 'S', 'S', 'E', 'S', 'S', 'E', 'E', 'S', 'S', 'W', 'W', 'W']
# A7 C8 D8 E10
# Score 4 for path: ['N', 'N', 'N', 'E', 'N', 'N', 'N', 'E', 'S', 'S', 'E', 'S', 'E', 'S', 'E', 'S', 'S', 'W', 'W', 'W']
# A7 C8 D8 E10
# Score 4 for path: ['N', 'N', 'N', 'E', 'N', 'N', 'N', 'E', 'S', 'S', 'E', 'S', 'E', 'E', 'S', 'S', 'S', 'W', 'W', 'W']
# A7 C8 D8 E10
# Score 4 for path: ['N', 'N', 'N', 'E', 'N', 'N', 'N', 'E', 'S', 'E', 'S', 'S', 'S', 'E', 'E', 'S', 'S', 'W', 'W', 'W']
# A7 C8 D8 E10
# Score 4 for path: ['N', 'N', 'N', 'E', 'N', 'N', 'N', 'E', 'S', 'E', 'S', 'S', 'E', 'S', 'E', 'S', 'S', 'W', 'W', 'W']
# A7 C8 D8 E10
# Score 5 for path: ['N', 'N', 'N', 'E', 'N', 'N', 'N', 'E', 'S', 'E', 'S', 'S', 'E', 'E', 'S', 'S', 'S', 'W', 'W', 'W']
# A7 A8 C8 D8 E10
# Score 4 for path: ['N', 'N', 'N', 'E', 'N', 'N', 'N', 'E', 'S', 'E', 'S', 'S', 'E', 'E', 'S', 'S', 'W', 'S', 'W', 'W']
# A8 C9 D8 E10
# Score 4 for path: ['N', 'N', 'N', 'E', 'N', 'N', 'N', 'E', 'E', 'S', 'S', 'S', 'S', 'W', 'S', 'E', 'E', 'S', 'W', 'W']
# A8 C9 C10 F7
# Score 4 for path: ['N', 'N', 'N', 'E', 'S', 'S', 'E', 'S', 'E', 'N', 'N', 'N', 'N', 'E', 'N', 'W', 'W', 'W', 'N', 'E']
# A7 A8 C8 C9
# Score 4 for path: ['N', 'N', 'N', 'E', 'S', 'S', 'E', 'S', 'E', 'N', 'N', 'N', 'E', 'N', 'N', 'W', 'W', 'W', 'N', 'E']
# A7 A8 C8 C9
# Score 4 for path: ['N', 'N', 'N', 'E', 'S', 'S', 'E', 'S', 'E', 'N', 'N', 'N', 'E', 'N', 'W', 'N', 'E', 'N', 'W', 'W']
# A8 C8 C9 F7
# Score 4 for path: ['N', 'N', 'N', 'E', 'S', 'S', 'E', 'S', 'E', 'N', 'N', 'N', 'E', 'N', 'W', 'N', 'W', 'W', 'N', 'E']
# A7 A8 C8 C9
# Score 4 for path: ['N', 'N', 'N', 'E', 'S', 'S', 'E', 'S', 'E', 'N', 'N', 'N', 'E', 'N', 'W', 'W', 'W', 'N', 'N', 'E']
# A7 C8 C9 F7
# Score 4 for path: ['N', 'N', 'N', 'E', 'S', 'E', 'S', 'S', 'E', 'N', 'N', 'N', 'N', 'E', 'N', 'W', 'W', 'W', 'N', 'E']
# A7 A8 C9 C10
# Score 4 for path: ['N', 'N', 'E', 'N', 'N', 'N', 'N', 'E', 'S', 'E', 'S', 'S', 'E', 'E', 'S', 'S', 'S', 'W', 'W', 'W']
# A7 A8 C8 E10
# Score 4 for path: ['N', 'N', 'E', 'N', 'N', 'N', 'N', 'E', 'E', 'S', 'S', 'S', 'S', 'W', 'S', 'E', 'E', 'S', 'W', 'W']
# A8 C9 C10 F7
# Score 4 for path: ['N', 'E', 'N', 'N', 'N', 'N', 'N', 'E', 'S', 'E', 'S', 'S', 'E', 'E', 'S', 'S', 'S', 'W', 'W', 'W']
# A7 A8 C8 E10
# Score 4 for path: ['E', 'N', 'N', 'N', 'N', 'N', 'N', 'E', 'S', 'E', 'S', 'S', 'E', 'E', 'S', 'S', 'S', 'W', 'W', 'W']
# A7 A8 C8 E10
# Score 4 for path: ['S', 'S', 'S', 'E', 'S', 'S', 'S', 'E', 'E', 'N', 'N', 'N', 'E', 'N', 'E', 'N', 'W', 'W', 'W', 'N']
# A1 A2 B4 E2
# Score 4 for path: ['S', 'S', 'E', 'S', 'S', 'S', 'S', 'E', 'E', 'N', 'N', 'N', 'E', 'N', 'E', 'N', 'W', 'W', 'W', 'N']
# A1 A2 B4 E2
# Score 4 for path: ['S', 'E', 'S', 'S', 'S', 'S', 'S', 'E', 'E', 'N', 'N', 'N', 'E', 'N', 'E', 'N', 'W', 'W', 'W', 'N']
# A1 A2 B4 E2
# Score 4 for path: ['E', 'S', 'S', 'S', 'S', 'S', 'E', 'S', 'E', 'N', 'N', 'N', 'E', 'N', 'W', 'N', 'E', 'N', 'W', 'W']
# A2 C2 C3 F1
# Score 4 for path: ['E', 'N', 'W', 'W', 'N', 'W', 'N', 'E', 'N', 'N', 'N', 'E', 'S', 'E', 'S', 'S', 'E', 'N', 'N', 'N']
# C7 C8 E8 E9
# Score 4 for path: ['E', 'N', 'W', 'W', 'N', 'W', 'N', 'E', 'N', 'N', 'N', 'E', 'E', 'S', 'S', 'S', 'E', 'N', 'N', 'N']
# C8 E8 E9 H7
# Score 4 for path: ['E', 'N', 'W', 'W', 'W', 'N', 'N', 'E', 'N', 'N', 'N', 'E', 'S', 'E', 'S', 'S', 'E', 'N', 'N', 'N']
# C7 C8 E8 E9
# Score 4 for path: ['E', 'N', 'W', 'W', 'W', 'N', 'N', 'E', 'N', 'N', 'N', 'E', 'E', 'S', 'S', 'S', 'E', 'N', 'N', 'N']
# C8 E8 E9 H7
# Score 4 for path: ['E', 'N', 'W', 'W', 'W', 'N', 'E', 'N', 'N', 'N', 'N', 'E', 'S', 'E', 'S', 'S', 'E', 'N', 'N', 'N']
# C7 C8 E8 E9
# Score 4 for path: ['E', 'N', 'W', 'W', 'W', 'N', 'E', 'N', 'N', 'N', 'N', 'E', 'E', 'S', 'S', 'S', 'E', 'N', 'N', 'N']
# C8 E8 E9 H7
# Score 4 for path: ['W', 'W', 'N', 'N', 'E', 'E', 'N', 'W', 'N', 'N', 'N', 'E', 'E', 'S', 'S', 'S', 'S', 'S', 'E', 'S']
# | |
<filename>projects/hog/tests/10.py
test = {
'name': 'Question 10',
'points': 1,
'suites': [
{
'cases': [
{
'code': r"""
>>> bacon_strategy(0, 9, cutoff=8, num_rolls=5)
26f5762c932a578994ea1c8fc7fa6c02
# locked
""",
'hidden': False,
'locked': True
},
{
'code': r"""
>>> bacon_strategy(9, 0, cutoff=6, num_rolls=5)
962aea5f59fc55bd65ccacf4603c8f22
# locked
""",
'hidden': False,
'locked': True
},
{
'code': r"""
>>> bacon_strategy(50, 2, cutoff=9, num_rolls=5)
26f5762c932a578994ea1c8fc7fa6c02
# locked
""",
'hidden': False,
'locked': True
},
{
'code': r"""
>>> bacon_strategy(32, 0, cutoff=5, num_rolls=4)
962aea5f59fc55bd65ccacf4603c8f22
# locked
""",
'hidden': False,
'locked': True
},
{
'code': r"""
>>> bacon_strategy(20, 1, cutoff=1, num_rolls=4)
962aea5f59fc55bd65ccacf4603c8f22
# locked
""",
'hidden': False,
'locked': True
},
{
'code': r"""
>>> from tests.check_strategy import check_strategy
>>> check_strategy(bacon_strategy)
""",
'hidden': False,
'locked': False
}
],
'scored': True,
'setup': r"""
>>> from hog import *
""",
'teardown': '',
'type': 'doctest'
},
{
'cases': [
{
'code': r"""
>>> bacon_strategy(44, 47, 0, 4)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> bacon_strategy(37, 12, 8, 10)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> bacon_strategy(40, 15, 13, 9)
9
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> bacon_strategy(24, 3, 8, 1)
1
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> bacon_strategy(46, 55, 5, 2)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> bacon_strategy(99, 78, 15, 7)
7
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> bacon_strategy(10, 73, 3, 5)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> bacon_strategy(47, 68, 3, 4)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> bacon_strategy(67, 84, 17, 10)
10
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> bacon_strategy(92, 54, 1, 7)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> bacon_strategy(9, 15, 0, 2)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> bacon_strategy(25, 63, 16, 2)
2
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> bacon_strategy(75, 27, 6, 2)
2
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> bacon_strategy(82, 48, 10, 1)
1
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> bacon_strategy(88, 12, 7, 10)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> bacon_strategy(72, 12, 5, 8)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> bacon_strategy(41, 69, 1, 5)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> bacon_strategy(15, 6, 16, 7)
7
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> bacon_strategy(42, 19, 5, 2)
2
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> bacon_strategy(93, 98, 8, 4)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> bacon_strategy(99, 90, 15, 10)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> bacon_strategy(73, 79, 4, 1)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> bacon_strategy(4, 44, 0, 5)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> bacon_strategy(83, 40, 9, 7)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> bacon_strategy(34, 3, 0, 8)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> bacon_strategy(4, 62, 15, 7)
7
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> bacon_strategy(53, 62, 6, 1)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> bacon_strategy(19, 56, 8, 9)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> bacon_strategy(1, 5, 0, 4)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> bacon_strategy(85, 34, 8, 1)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> bacon_strategy(37, 37, 13, 5)
5
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> bacon_strategy(82, 87, 16, 3)
3
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> bacon_strategy(87, 43, 5, 7)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> bacon_strategy(20, 7, 2, 3)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> bacon_strategy(33, 85, 4, 4)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> bacon_strategy(73, 15, 12, 8)
8
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> bacon_strategy(5, 98, 8, 2)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> bacon_strategy(15, 76, 3, 4)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> bacon_strategy(33, 75, 19, 5)
5
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> bacon_strategy(9, 41, 0, 5)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> bacon_strategy(70, 91, 7, 6)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> bacon_strategy(64, 35, 12, 3)
3
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> bacon_strategy(51, 92, 14, 8)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> bacon_strategy(68, 64, 17, 6)
6
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> bacon_strategy(20, 35, 17, 4)
4
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> bacon_strategy(75, 30, 3, 1)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> bacon_strategy(61, 69, 8, 5)
5
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> bacon_strategy(7, 6, 7, 9)
9
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> bacon_strategy(0, 51, 17, 5)
5
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> bacon_strategy(42, 45, 8, 6)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> bacon_strategy(48, 96, 11, 2)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> bacon_strategy(57, 96, 9, 6)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> bacon_strategy(28, 11, 13, 8)
8
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> bacon_strategy(25, 29, 5, 7)
7
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> bacon_strategy(69, 2, 15, 8)
8
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> bacon_strategy(77, 26, 7, 9)
9
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> bacon_strategy(85, 15, 0, 3)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> bacon_strategy(79, 86, 5, 7)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> bacon_strategy(35, 32, 14, 6)
6
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> bacon_strategy(49, 44, 13, 8)
8
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> bacon_strategy(77, 65, 6, 4)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> bacon_strategy(99, 18, 2, 1)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> bacon_strategy(18, 24, 17, 10)
10
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> bacon_strategy(44, 11, 18, 1)
1
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> bacon_strategy(68, 38, 17, 5)
5
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> bacon_strategy(46, 63, 8, 6)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> bacon_strategy(20, 60, 19, 6)
6
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> bacon_strategy(67, 53, 10, 6)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> bacon_strategy(63, 39, 4, 1)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> bacon_strategy(54, 75, 9, 8)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> bacon_strategy(78, 86, 18, 9)
9
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> bacon_strategy(45, 11, 8, 9)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> bacon_strategy(88, 19, 14, 6)
6
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> bacon_strategy(22, 18, 14, 1)
1
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> bacon_strategy(30, 91, 9, 10)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> bacon_strategy(19, 81, 8, 1)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> bacon_strategy(33, 7, 0, | |
<reponame>IFinners/ligrarian
#!/usr/bin/env python3
"""Automatically update Goodreads and local Spreadsheet with book read info.
Args:
Three operational modes (g)ui, (s)earch or (u)rl
gui arguments:
None
search arguments:
Title of Book: Enclosed in double quotation marks
Author of Book: Enclosed in double quotation marks
Format: (p)aperback, (h)ardcover, (k)indle or (e)book
Read Date: (t)oday, (y)esterday or a date formatted DD/MM/YYYY
Rating: Number between 1 and 5
Review (Optional): Enclosed in double quotation marks
url arguments:
URL: Goodreads URL for the book
Read Date: (t)oday, (y)esterday or a date formatted DD/MM/YYYY
Rating: Number between 1 and 5
Review (Optional): Enclosed in double quotation marks
"""
import argparse
import configparser
from datetime import datetime as dt
from datetime import timedelta
import sys
import tkinter as tk
from tkinter import messagebox
import bs4
import openpyxl
import requests
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.firefox.options import Options
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import Select
from selenium.webdriver.support.ui import WebDriverWait
class Gui:
"""Acts as the base of the GUI and contains the assoicated methods."""
def __init__(self, master, settings):
"""Gui class constructor to initialise Gui object.
Args:
master (obj): tkinter TK object - base object for the GUI.
"""
self.master = master
self.master.title("Ligrarian")
self.master.geometry('665x560')
self.settings = settings
self.info = {}
# Labels
login_label = tk.Label(self.master, text="Login")
login_label.grid(row=2, column=1, sticky='W',
pady=(30, 5), padx=10)
self.main_label = tk.Label(self.master, text='Search')
self.main_label.grid(row=3, column=1, sticky='W', padx=10)
date_label = tk.Label(self.master, text="Date")
date_label.grid(row=5, column=1, sticky='W', padx=10)
self.format_label = tk.Label(self.master, text="Format")
self.format_label.grid(row=6, column=1, sticky='W', padx=10)
rating_label = tk.Label(self.master, text="Rating")
rating_label.grid(row=7, column=1, sticky='W', padx=10)
review_label = tk.Label(
self.master, text="Review\n (optional)", padx=10
)
review_label.grid(row=8, column=1, sticky='W')
# Widgets
self.email = tk.Entry(self.master, width=20)
email = self.settings['email']
if email:
self.email.insert(0, email)
else:
self.email.insert(0, 'Email')
self.email.grid(row=2, column=2, columnspan=3,
sticky='W', pady=(30, 5))
self.password = tk.Entry(self.master, width=20)
password = self.settings['password']
if password:
self.password.insert(0, '********')
else:
self.password.insert(0, 'Password')
self.password.grid(row=2, column=4, columnspan=3, sticky='W',
pady=(30, 5))
self.save_choice = tk.IntVar()
save_box = tk.Checkbutton(
self.master, text='Save Password', variable=self.save_choice,
onvalue=True, offvalue=False
)
save_box.grid(row=2, column=7, sticky='W', pady=(30, 5))
if password:
save_box.select()
self.main = tk.Entry(self.master, width=45)
self.main.grid(row=3, column=2, columnspan=6, sticky='W', pady=10)
self.mode = tk.IntVar()
mode_button = tk.Checkbutton(
self.master, text='URL Mode', variable=self.mode,
onvalue=True, offvalue=False,
command=self.mode_switch
)
mode_button.grid(row=3, column=7, sticky='W')
formats = ("Paperback", "Hardback", "Kindle", "Ebook",)
self.format = tk.StringVar()
self.format.set(self.settings["format"])
self.format_menu = tk.OptionMenu(self.master, self.format, *formats)
self.format_menu.grid(row=6, column=2, columnspan=3,
sticky='W', pady=5)
self.date = tk.Entry(self.master, width=10)
self.date.insert(0, 'DD/MM/YYYY')
self.date.grid(row=5, column=2, sticky='W', pady=10, ipady=3, ipadx=5)
today_button = tk.Button(self.master, text="Today",
command=self.set_date)
today_button.grid(row=5, column=3, sticky='W', pady=10,)
yesterday_button = tk.Button(self.master, text="Yesterday",
command=lambda: self.set_date(True))
yesterday_button.grid(row=5, column=4, sticky='W', pady=10)
stars = ("1", "2", "3", "4", "5")
self.rating = tk.StringVar()
self.rating.set(self.settings["rating"])
rating_menu = tk.OptionMenu(self.master, self.rating, *stars)
rating_menu.grid(row=7, column=2, sticky='W', pady=5)
self.review = tk.Text(self.master, height=15,
width=75, wrap=tk.WORD)
self.review.grid(row=8, column=2, columnspan=7, sticky='W', pady=5)
submit_button = tk.Button(self.master, text="Mark as Read",
command=self.parse_input)
submit_button.grid(row=12, column=7, columnspan=2, sticky='E', pady=15)
def mode_switch(self):
"""Modify displayed widgets and edit label text depending on mode."""
if self.mode.get():
self.format_label.grid_remove()
self.format_menu.grid_remove()
self.main_label.configure(text='URL')
else:
self.format_label.grid()
self.format_menu.grid()
self.main_label.configure(text='Search')
def set_date(self, yesterday=False):
"""Set date widget to a new value.
Args:
yesterday (bool): Whether to set date to yesterday's date or not.
"""
self.date.delete(0, 10)
self.date.insert(0, get_date_str(yesterday))
def parse_input(self):
"""Create input dictionary and test required info has been given."""
self.mode = self.mode.get()
self.settings['email'] = self.email.get()
password = self.password.get()
if password == '********':
password = self.settings['password']
if self.save_choice.get():
self.settings['password'] = password
self.info = {
'main': self.main.get(),
'date': self.date.get(),
'format': self.format.get(),
'rating': self.rating.get(),
'review': self.review.get('1.0', 'end-1c'),
}
try:
assert self.settings['email'] != 'Email'
assert self.settings['password'] != 'Password'
assert self.info['main']
if not self.mode:
assert self.info['format']
self.master.destroy()
except AssertionError:
messagebox.showwarning(message="Complete all non-optional "
"fields before marking as read."
)
def retrieve_settings():
"""Retrieve settings from .ini and return values as dictionary.
Returns:
settings (dict): Dictionary with option: value format.
"""
config = configparser.ConfigParser()
config.read('settings.ini')
settings = {}
for section in config.sections():
for key, value in config.items(section):
if key in ['prompt', 'headless']:
value = bool(value == "True")
settings[key] = value
return settings
def create_gui(settings_dict):
"""Create GUI instance and return it."""
root = tk.Tk()
root.protocol("WM_DELETE_WINDOW", exit)
gui = Gui(root, settings_dict)
root.mainloop()
return gui
def gui_mode_details_edits(gui):
"""Assess gui mode and edit details dictionary accordingly.
Args:
gui (obj): Instance of GUI class.
"""
if gui.mode:
gui.info['url'] = gui.info['main']
else:
gui.info['search'] = gui.info['main']
del gui.info['main']
return gui.info
def get_date_str(yesterday=False):
"""Return a string of today's or yesterday's date.
Args:
yesterday (bool): Whether to get yesterday's date or not.
Returns:
Strftime datetime of today's or yesterday's date formatted 'DD/MM/YYYY'.
"""
today_datetime = dt.now()
if yesterday:
return dt.strftime(today_datetime - timedelta(1), '%d/%m/%Y')
return dt.strftime(today_datetime, '%d/%m/%Y')
def parse_arguments():
"""Set up parsers/subparsers and parse command line arguments.
Returns:
Dictionary of parsed arguments.
"""
parser = argparse.ArgumentParser(description="Goodreads updater")
subparsers = parser.add_subparsers(help="Choose (u)rl, (s)earch or (g)ui")
url_parser = subparsers.add_parser("url", aliases=['u'])
url_parser.add_argument('url', metavar="url",
help="Book's Goodreads URL within quotes")
url_parser.add_argument('date', help=("(t)oday, (y)esterday or "
"date formatted DD/MM/YYYY"))
url_parser.add_argument('rating', type=int, metavar='rating',
choices=[1, 2, 3, 4, 5],
help="A number 1 through 5")
url_parser.add_argument('review', nargs='?', metavar="'review'",
help="Review enclosed in quotes")
search_parser = subparsers.add_parser('search', aliases=['s'])
search_parser.add_argument('search', metavar="'search terms'",
help="Search terms to use e.g. Book title "
"and Author")
search_parser.add_argument('format', metavar='format',
choices=['e', 'h', 'k', 'p'],
help="(p)aperback, (h)ardcover, "
"(k)indle, (e)book")
search_parser.add_argument('date', help=("(t)oday, (y)esterday or "
"date formatted DD/MM/YYYY"))
search_parser.add_argument('rating', type=int, metavar='rating',
choices=[1, 2, 3, 4, 5],
help="A number 1 through 5")
search_parser.add_argument('review', nargs='?', metavar="'review'",
help="Review enclosed in quotes")
gui = subparsers.add_parser("gui", aliases=['g'])
gui.add_argument('gui', action='store_true',
help="Invoke GUI (Defaults to True)")
args = parser.parse_args()
return vars(args)
def create_driver(run_headless):
"""Create the appropriate driver for the session.
Args:
run_headless (bool): Run in headless mode or not
"""
if run_headless:
print(('Opening a headless computer controlled browser and updating '
'Goodreads'))
options = Options()
options.headless = True
return webdriver.Firefox(options=options)
print('Opening a computer controlled browser and updating Goodreads')
return webdriver.Firefox()
def check_and_prompt_for_email_password(settings_dict):
"""Assess if email and password are missing and if so prompt for them.
Args:
settings (dict): Dictionary of user settings
"""
if not settings_dict['email']:
settings_dict['email'] = input('Email: ')
if not settings_dict['password']:
password = input('Password: ')
if settings_dict['prompt']:
save = input("Save Password?(y/n): ")
if save.lower() == 'y':
settings_dict['password'] = password
elif save.lower() == 'n':
disable = input("Disable save Password prompt?(y/n): ")
if disable.lower() == 'y':
settings_dict['prompt'] = False
else:
settings_dict['prompt'] = True
def write_initial_config():
"""Writes the inital, default settings.ini file."""
config = configparser.ConfigParser()
config['user'] = {'email': '',
'password': ''}
config['settings'] = {'prompt': 'False',
'path': './Ligrarian.xlsx',
'headless': 'False'}
config['defaults'] = {'format': 'Paperback',
'rating': '3'}
with open('settings.ini', 'w') as configfile:
config.write(configfile)
def write_config(email, password, prompt):
"""Write configuration file.
Args:
email (str): Email address to write to the config file.
password (str): Password to write to the config file.
prompt (str): Whether to disable the save prompt ('yes' or 'no').
"""
config = configparser.ConfigParser()
config.read('settings.ini')
config.set('user', 'email', email)
config.set('user', 'password', password)
config.set('settings', 'prompt', str(prompt))
with open('settings.ini', 'w') as configfile:
config.write(configfile)
def goodreads_login(driver, email, password):
"""Login to Goodreads account from the homepage.
Args:
driver: Selenium webdriver to act upon.
email (str): Email address to be entered.
password (str): Password to be entered.
"""
driver.get('https://goodreads.com/user/sign_in')
driver.find_element_by_name('user[email]').send_keys(email)
pass_elem = driver.find_element_by_name('user[password]')
pass_elem.send_keys(password, Keys.ENTER)
try:
driver.find_element_by_class_name('siteHeader__personal')
except NoSuchElementException:
print('Failed to login - Email and/or Password probably incorrect.')
driver.close()
sys.exit()
def goodreads_find(driver, terms):
"""Find the book on Goodreads and navigate to all editions page.
Args:
driver: Selenium webdriver to act upon.
terms (str): Terms to be used in the Goodreads search.
Raises:
NoSuchElementException: Search terms yields no results.
"""
search_elem = driver.find_element_by_class_name('searchBox__input')
search_elem.send_keys(terms, Keys.ENTER)
try:
driver.find_element_by_partial_link_text('edition').click()
except NoSuchElementException:
print("Failed to find book using those search terms.")
driver.close()
sys.exit()
def goodreads_filter(driver, book_format):
"""Filter editions with book_format and select top book.
Args:
driver: Selenium webdriver to act upon.
book_format (str): The format of the book.
Returns:
Current URL the driver argument is now visiting.
"""
pre_filter_url = driver.current_url
# Filter by format
filter_elem = driver.find_element_by_name('filter_by_format')
filter_elem.click()
filter_elem.send_keys(book_format, Keys.ENTER)
# Make sure filtered page is loaded before clicking top book
WebDriverWait(driver, 10).until(
EC.url_changes((pre_filter_url))
)
# Select top book
driver.find_element_by_class_name('bookTitle').click()
return driver.current_url
def goodreads_get_shelves(driver, rating):
"""Find and return list of 'Top Shelves' on Goodreads book page.
Args:
driver: Selenium webdriver to act upon.
rating (str): String representation of a number 1-5.
Returns:
list of strings of the 'shelve' categories on the current driver page.
"""
shelves_elems = driver.find_elements_by_class_name('actionLinkLite.'
'bookPageGenreLink')
shelves = []
for shelf in shelves_elems:
if ' | |
not list else sorted(set([lb for lbs in lb_df for lb in lbs]))
self.binlb = OrderedDict([(lb, i) for i, lb in enumerate(labels)])
self.binlbr = OrderedDict([(i, lb) for i, lb in enumerate(labels)])
self.mltl = True
elif (binlb is None):
lb_df = self.df[self.df[self.label_col].notnull()][self.label_col]
labels = sorted(set(lb_df)) if type(lb_df.iloc[0]) is not list else sorted(set([lb for lbs in lb_df for lb in lbs]))
self.binlb = OrderedDict([(lb, i) for i, lb in enumerate(labels)])
self.binlbr = OrderedDict([(i, lb) for i, lb in enumerate(labels)])
else:
self.binlb = binlb
self.binlbr = OrderedDict([(i, lb) for lb, i in binlb.items()])
self.encode_func = encode_func
self.tokenizer = tokenizer
if hasattr(tokenizer, 'vocab'):
self.vocab_size = len(tokenizer.vocab)
elif hasattr(tokenizer, 'vocab_size'):
self.vocab_size = tokenizer.vocab_size
self.transforms = transforms
self.transforms_args = transforms_args
self.transforms_kwargs = transforms_kwargs
def __len__(self):
return len(self.df)
def __getitem__(self, idx):
record = self.df.iloc[idx]
sample = self.encode_func(record[self.text_col], self.tokenizer), record[self.label_col]
sample = self._transform_chain(sample)
return self.df.index[idx], (sample[0] if type(sample[0]) is str or type(sample[0][0]) is str else torch.tensor(sample[0])), torch.tensor(sample[1])
def _transform_chain(self, sample):
if self.transforms:
self.transforms = self.transforms if type(self.transforms) is list else [self.transforms]
self.transforms_kwargs = self.transforms_kwargs if type(self.transforms_kwargs) is list else [self.transforms_kwargs]
for transform, transform_kwargs in zip(self.transforms, self.transforms_kwargs):
transform_kwargs.update(self.transforms_args)
sample = transform(sample, **transform_kwargs) if callable(transform) else getattr(self, transform)(sample, **transform_kwargs)
return sample
def _nmt_transform(self, sample, options=None, binlb={}):
if (len(binlb) > 0): self.binlb = binlb
return sample[0], [self.binlb.setdefault(y, len(self.binlb)) for y in sample[1]]
def _mltc_transform(self, sample, options=None, binlb={}):
if (len(binlb) > 0): self.binlb = binlb
return sample[0], self.binlb.setdefault(sample[1], len(self.binlb))
def _mltl_transform(self, sample, options=None, binlb={}, get_lb=lambda x: x.split(SC)):
if (len(binlb) > 0): self.binlb = binlb
labels = get_lb(sample[1])
return sample[0], [1 if lb in labels else 0 for lb in self.binlb.keys()]
def fill_labels(self, lbs, binlb=True, index=None, saved_path=None, **kwargs):
if binlb and self.binlbr is not None:
lbs = [(';'.join([self.binlbr[l] for l in np.where(lb == 1)[0]]) if self.mltl else ','.join(['_'.join([str(i), str(l)]) for i, l in enumerate(lb)])) if hasattr(lb, '__iter__') else self.binlbr[lb] for lb in lbs]
filled_df = self._df.copy(deep=True)[~self._df.index.duplicated(keep='first')]
try:
if index:
filled_df.loc[index, self.label_col] = lbs
else:
filled_df[self.label_col] = lbs
except Exception as e:
print(e)
with open('pred_lbs.tmp', 'wb') as fd:
pickle.dump((filled_df, index, self.label_col, lbs), fd)
raise e
if (saved_path is not None):
filled_df.to_csv(saved_path, **kwargs)
return filled_df
def rebalance(self):
if (self.binlb is None): return
task_cols, task_trsfm, task_extparms = TASK_COL_MAP[opts.task], TASK_TRSFM[opts.task], TASK_EXT_PARAMS[opts.task]
lb_trsfm = [x['get_lb'] for x in task_trsfm[1] if 'get_lb' in x]
self.df = self._df
if len(lb_trsfm) > 0:
lb_df = self.df[task_cols['y']].apply(lb_trsfm[0])
else:
lb_df = self.df[task_cols['y']]
if (type(lb_df.iloc[0]) is list):
lb_df[:] = [self._mltl_transform((None, SC.join(lbs)))[1] for lbs in lb_df]
max_lb_df = lb_df.loc[[idx for idx, lbs in lb_df.iteritems() if np.sum(list(map(int, lbs))) == 0]]
max_num, avg_num = max_lb_df.shape[0], 1.0 * lb_df[~lb_df.index.isin(max_lb_df.index)].shape[0] / len(lb_df.iloc[0])
else:
class_count = np.array([[1 if lb in y else 0 for lb in self.binlb.keys()] for y in lb_df if y is not None]).sum(axis=0)
max_num, max_lb_bin = class_count.max(), class_count.argmax()
max_lb_df = lb_df[lb_df == self.binlbr[max_lb_bin]]
avg_num = np.mean([class_count[x] for x in range(len(class_count)) if x != max_lb_bin])
removed_idx = max_lb_df.sample(n=int(max_num-avg_num), random_state=1).index
self.df = self.df.loc[list(set(self.df.index)-set(removed_idx))]
def remove_mostfrqlb(self):
if (self.binlb is None or self.binlb == 'rgrsn'): return
task_cols, task_trsfm, task_extparms = TASK_COL_MAP[opts.task], TASK_TRSFM[opts.task], TASK_EXT_PARAMS[opts.task]
lb_trsfm = [x['get_lb'] for x in task_trsfm[1] if 'get_lb' in x]
self.df = self._df
if len(lb_trsfm) > 0:
lb_df = self.df[task_cols['y']].apply(lb_trsfm[0])
else:
lb_df = self.df[task_cols['y']]
class_count = np.array([[1 if lb in y else 0 for lb in self.binlb.keys()] for y in lb_df if y]).sum(axis=0)
max_num, max_lb_bin = class_count.max(), class_count.argmax()
max_lb_df = lb_df[lb_df == self.binlbr[max_lb_bin]]
self.df = self.df.loc[list(set(self.df.index)-set(max_lb_df.index))]
class SentSimDataset(BaseDataset):
"""Sentence Similarity task dataset class"""
def __getitem__(self, idx):
record = self.df.iloc[idx]
sample = [self.encode_func(record[sent_idx], self.tokenizer) for sent_idx in self.text_col], record[self.label_col]
sample = self._transform_chain(sample)
return self.df.index[idx], (sample[0] if type(sample[0][0]) is str or type(sample[0][0][0]) is str else torch.tensor(sample[0])), torch.tensor(0 if sample[1] is np.nan else float(sample[1]) / 5.0)
def fill_labels(self, lbs, index=None, saved_path=None, **kwargs):
lbs = 5.0 * lbs
filled_df = self._df.copy(deep=True)[~self._df.index.duplicated(keep='first')]
if index:
filled_df.loc[index, self.label_col] = lbs
else:
filled_df[self.label_col] = lbs
if (saved_path is not None):
filled_df.to_csv(saved_path, **kwargs)
return filled_df
class EntlmntDataset(BaseDataset):
"""Entailment task dataset class"""
def __getitem__(self, idx):
record = self.df.iloc[idx]
sample = [self.encode_func(record[sent_idx], self.tokenizer) for sent_idx in self.text_col], record[self.label_col]
sample = self._transform_chain(sample)
return self.df.index[idx], (sample[0] if type(sample[0][0]) is str or (type(sample[0][0]) is list and type(sample[0][0][0]) is str) else torch.tensor(sample[0])), torch.tensor(sample[1])
class NERDataset(BaseDataset):
"""NER task dataset class"""
def __init__(self, csv_file, text_col, label_col, encode_func, tokenizer, sep='\t', binlb=None, transforms=[], transforms_args={}, transforms_kwargs=[], **kwargs):
super(NERDataset, self).__init__(csv_file, text_col, label_col, encode_func, tokenizer, sep=sep, header=None, skip_blank_lines=False, keep_default_na=False, na_values=[], binlb=binlb, transforms=transforms, transforms_args=transforms_args, transforms_kwargs=transforms_kwargs, **kwargs)
sep_selector = self.df[self.text_col].apply(lambda x: True if x=='.' else False)
sep_selector.iloc[-1] = True
int_idx = pd.DataFrame(np.arange(self.df.shape[0]), index=self.df.index)
self.boundaries = [0] + list(itertools.chain.from_iterable((int_idx[sep_selector.values].values+1).tolist()))
def __len__(self):
return len(self.boundaries) - 1
def __getitem__(self, idx):
record = self.df.iloc[self.boundaries[idx]:self.boundaries[idx+1]].dropna()
sample = self.encode_func(record[self.text_col].values.tolist(), self.tokenizer), record[self.label_col].values.tolist()
num_samples = [len(x) for x in sample[0]] if (len(sample[0]) > 0 and type(sample[0][0]) is list) else [1] * len(sample[0])
record_idx = [0] + np.cumsum(num_samples).tolist()
is_empty = (type(sample[0]) is list and len(sample[0]) == 0) or (type(sample[0]) is list and len(sample[0]) > 0 and all([type(x) is list and len(x) == 0 for x in sample[0]]))
if (is_empty): return SC.join(map(str, record.index.values.tolist())), '' if self.encode_func == _tokenize else torch.LongTensor([-1]*opts.maxlen), '' if self.encode_func == _tokenize else torch.LongTensor([-1]*opts.maxlen), SC.join(map(str, record_idx))
is_encoded = (type(sample[0]) is list and type(sample[0][0]) is int) or (type(sample[0]) is list and len(sample[0]) > 0 and type(sample[0][0]) is list and len(sample[0][0]) > 0 and type(sample[0][0][0]) is int)
sample = list(itertools.chain.from_iterable(sample[0])) if is_encoded else sample[0], list(itertools.chain.from_iterable([[x] * ns for x, ns in zip(sample[1], num_samples)]))
sample = self._transform_chain(sample)
return SC.join(map(str, record.index.values.tolist())), (torch.tensor(sample[0]) if is_encoded else SC.join(sample[0])), (torch.tensor(sample[1]) if is_encoded else SC.join(map(str, sample[1]))), SC.join(map(str, record_idx))
def fill_labels(self, lbs, saved_path=None, binlb=True, index=None, **kwargs):
if binlb and self.binlbr is not None:
lbs = [self.binlbr[lb] for lb in lbs]
filled_df = self._df.copy(deep=True)[~self._df.index.duplicated(keep='first')]
if index:
filled_df[self.label_col] = ''
filled_df.loc[index, self.label_col] = lbs
else:
filled_df[self.label_col] = lbs
if (saved_path is not None):
filled_df.to_csv(saved_path, sep='\t', header=None, index=None, **kwargs)
return filled_df
def _sentclf_transform(sample, options=None, start_tknids=[], clf_tknids=[]):
X, y = sample
X = [start_tknids + x + clf_tknids for x in X] if hasattr(X, '__iter__') and len(X) > 0 and type(X[0]) is not str and hasattr(X[0], '__iter__') else start_tknids + X + clf_tknids
return X, y
def _entlmnt_transform(sample, options=None, start_tknids=[], clf_tknids=[], delim_tknids=[]):
X, y = sample
X = start_tknids + X[0] + delim_tknids + X[1] + clf_tknids
return X, y
def _sentsim_transform(sample, options=None, start_tknids=[], clf_tknids=[], delim_tknids=[]):
X, y = sample
X = [start_tknids + X[0] + delim_tknids + X[1] + clf_tknids, start_tknids + X[1] + delim_tknids + X[0] + clf_tknids]
return X, y
def _padtrim_transform(sample, options=None, seqlen=32, xpad_val=0, ypad_val=None):
X, y = sample
X = [x[:min(seqlen, len(x))] + [xpad_val] * (seqlen - len(x)) for x in X] if hasattr(X, '__iter__') and len(X) > 0 and type(X[0]) is not str and hasattr(X[0], '__iter__') else X[:min(seqlen, len(X))] + [xpad_val] * (seqlen - len(X))
if ypad_val is not None: y = [x[:min(seqlen, len(x))] + [ypad_val] * (seqlen - len(x)) for x in y] if hasattr(y, '__iter__') and len(y) > 0 and type(y[0]) is not str and hasattr(y[0], '__iter__') else y[:min(seqlen, len(y))] + [ypad_val] * (seqlen - len(y))
return X, y
def _trim_transform(sample, options=None, seqlen=32, trimlbs=False, special_tkns={}):
seqlen -= sum([len(v) for v in special_tkns.values()])
X, y = sample
X = [x[:min(seqlen, len(x))] for x in X] if hasattr(X, '__iter__') and len(X) > 0 and type(X[0]) is not str and hasattr(X[0], '__iter__') else X[:min(seqlen, len(X))]
if trimlbs: y = [x[:min(seqlen, len(x))] for x in y] if hasattr(y, '__iter__') and len(y) > 0 and type(y[0]) is not str and hasattr(y[0], '__iter__') else y[:min(seqlen, len(y))]
return X, y
def _pad_transform(sample, options=None, seqlen=32, xpad_val=0, ypad_val=None):
X, y = sample
X = [x + [xpad_val] * (seqlen - len(x)) for x in X] if hasattr(X, '__iter__') and len(X) > 0 and type(X[0]) is not str and hasattr(X[0], '__iter__') else X + [xpad_val] * (seqlen - len(X))
if ypad_val is not None: y = [x + [ypad_val] * (seqlen - len(x)) for x in y] if hasattr(y, '__iter__') and len(y) > 0 and type(y[0]) is not str and hasattr(y[0], '__iter__') else y + [ypad_val] * (seqlen - len(y))
return X, y
def _adjust_encoder(mdl_name, tokenizer, extra_tokens=[], ret_list=False):
return [[tkn] if ret_list else tkn | |
'proceeding_paper',
'flight_readiness_report',
'post_flight_performance_report',
'education_outreach',
'verified_budget',
'final_motor_selection',
'close_out_finance_document',
'invoice_q1',
'invoice_q2',
'invoice_q3',
'invoice_q4',
'charges_certification',
'institutional_w9',
'url1',
'url2',
'url3',
'team_photo',
'team_biography',
'virtual_cdr',
'virtual_pdr',
'virtual_frr',
)
def __init__(self, *args, **kwargs):
"""Override of the initialization method to obtain the request object."""
self.request = kwargs.pop('request', None)
super(RocketLaunchTeamForm, self).__init__(*args, **kwargs)
def clean(self):
"""Deal with the auto populate fields."""
cd = self.cleaned_data
cid = cd.get('co_advisor')
gid = cd.get('grants_officer')
lid = cd.get('leader')
uid = str(self.request.user.id)
# Assign a User object to grants officer
if gid:
if gid == uid:
self.add_error(
'grants_officer',
"You cannot also be an authorized user",
)
cd['grants_officer'] = None
else:
try:
user = User.objects.get(pk=gid)
if user.profile:
cd['grants_officer'] = user
self.request.session['grants_officer_name'] = '{0}, {1}'.format(
user.last_name, user.first_name,
)
else:
self.add_error(
'grants_officer',
"This user does not have a complete profile",
)
cd['grants_officer'] = None
except Exception:
self.add_error(
'grants_officer',
"That User does not exist in the system",
)
else:
cd['grants_officer'] = None
# Assign a User object to co-advisor
if cid:
if cid == uid:
self.add_error('co_advisor', "You cannot also be a co-advisor")
cd['co_advisor'] = None
elif cid == lid:
self.add_error(
'co_advisor',
"Co-advisor and Team Lead cannot be the same person.",
)
cd['co_advisor'] = None
elif cid == gid:
self.add_error(
'co_advisor',
"Co-advisor and Authorized User cannot be the same person.",
)
cd['leader'] = None
else:
try:
user = User.objects.get(pk=cid)
cd['co_advisor'] = user
self.request.session['co_advisor_name'] = '{0}, {1}'.format(
user.last_name, user.first_name,
)
except Exception:
self.add_error(
'co_advisor', "That User does not exist in the system",
)
else:
cd['co_advisor'] = None
# Assign a User object to team leader
if lid:
if lid == uid:
self.add_error('leader', "You cannot also be a team lead.")
cd['leader'] = None
elif lid == gid:
self.add_error(
'leader',
"Authorized user and team lead cannot be the same person.",
)
cd['leader'] = None
elif lid == cid:
self.add_error(
'leader',
"Team Lead and co-adivsor cannot be the same person.",
)
cd['leader'] = None
else:
try:
user = User.objects.get(pk=lid)
cd['leader'] = user
self.request.session['leader_name'] = '{0}, {1}'.format(
user.last_name, user.first_name,
)
except Exception:
self.add_error(
'leader', "The team leader does not exist in the system",
)
else:
cd['leader'] = None
return cd
class RocketLaunchTeamUploadsForm(forms.ModelForm):
"""Rocket Launch Team uploads form."""
class Meta:
"""Attributes about the form and options."""
model = RocketLaunchTeam
fields = (
'award_acceptance',
'interim_progress_report',
'preliminary_design_report',
'final_design_report',
'flight_demo',
'lodging_list',
'openrocketrocksim',
'openrocketrocksim2',
'openrocketrocksim3',
'openrocketrocksim4',
'patch_contest',
'other_file',
'other_file2',
'other_file3',
'critical_design_report',
'oral_presentation',
'post_flight_performance_report',
'education_outreach',
'flight_readiness_report',
'proceeding_paper',
'proposal',
'budget',
'verified_budget',
'close_out_finance_document',
'flysheet_1',
'flysheet_2',
'flysheet_3',
'flysheet_4',
'invoice_q1',
'invoice_q2',
'invoice_q3',
'invoice_q4',
'charges_certification',
'institutional_w9',
'virtual_cdr',
'virtual_pdr',
'virtual_frr',
'team_photo',
'team_biography',
)
class FirstNationsRocketCompetitionForm(forms.ModelForm):
"""First Nations Rocket Competition form."""
past_funding = forms.TypedChoiceField(
label="Have you received WSGC funding within the past five years?",
choices=BINARY_CHOICES,
widget=forms.RadioSelect(),
)
past_funding_year = forms.CharField(
label="If 'Yes', what year?",
widget=forms.Select(choices=PAST_FUNDING_YEAR_CHOICES),
required=False,
)
class Meta:
"""Attributes about the form and options."""
model = FirstNationsRocketCompetition
exclude = (
'complete',
'user',
'status',
'funded_code',
'funds_authorized',
'authorized_match',
'award_acceptance',
'interim_report',
'final_report',
'other_file',
'other_file2',
'other_file3',
'url1',
'url2',
'url3',
)
def __init__(self, *args, **kwargs):
"""Override of the initialization method to set team choices."""
super(FirstNationsRocketCompetitionForm, self).__init__(*args, **kwargs)
self.fields['team'].queryset = RocketLaunchTeam.objects.filter(
competition__contains="First Nations",
).filter(date_created__gte=get_start_date()).order_by("name")
class FirstNationsRocketCompetitionUploadsForm(forms.ModelForm):
"""
WSGC have removed the requirement for Award Acceptance letter.
We will keep this form class in place for when they decide to go
back to requiring it.
"""
class Meta:
"""Attributes about the form and options."""
model = FirstNationsRocketCompetition
fields = (
'award_acceptance', 'other_file', 'other_file2', 'other_file3',
)
class MidwestHighPoweredRocketCompetitionForm(forms.ModelForm):
"""Midwest High Powered Rocket Competition form."""
past_funding = forms.TypedChoiceField(
label="Have you received WSGC funding within the past five years?",
choices=BINARY_CHOICES,
widget=forms.RadioSelect(),
)
past_funding_year = forms.CharField(
label="If 'Yes', what year?",
widget=forms.Select(choices=PAST_FUNDING_YEAR_CHOICES),
required=False,
)
other_fellowship = forms.TypedChoiceField(
label="""
Do you currently hold another Federal fellowship or traineeship?
""",
choices=BINARY_CHOICES,
widget=forms.RadioSelect(),
)
past_participation = forms.TypedChoiceField(
label="Have you previously participated in Collegiate Rocket Launch?",
choices=BINARY_CHOICES,
widget=forms.RadioSelect(),
)
class Meta:
"""Attributes about the form and options."""
model = MidwestHighPoweredRocketCompetition
exclude = (
'complete',
'user',
'status',
'funded_code',
'funds_authorized',
'authorized_match',
'award_acceptance',
'interim_report',
'final_report',
'other_file',
'other_file2',
'other_file3',
'url1',
'url2',
'url3',
)
def __init__(self, *args, **kwargs):
"""Override of the initialization method to set team choices."""
super(MidwestHighPoweredRocketCompetitionForm, self).__init__(*args, **kwargs)
self.fields['team'].queryset = RocketLaunchTeam.objects.annotate(
count=Count('members'),
).filter(
competition__in=['Midwest High Powered Rocket Competition'],
).filter(
date_created__gte=get_start_date(),
).exclude(
count__gte=settings.ROCKET_LAUNCH_COMPETITION_TEAM_LIMIT,
).order_by("name")
class MidwestHighPoweredRocketCompetitionUploadsForm(forms.ModelForm):
"""Midwest High Powered Rocket Competition uploads form."""
class Meta:
"""Attributes about the form and options."""
model = MidwestHighPoweredRocketCompetition
fields = (
'award_acceptance', 'other_file', 'other_file2', 'other_file3',
)
class CollegiateRocketCompetitionForm(forms.ModelForm):
"""Collegiate Rocket Competition form."""
past_funding = forms.TypedChoiceField(
label="Have you received WSGC funding within the past five years?",
choices=BINARY_CHOICES,
widget=forms.RadioSelect(),
)
past_funding_year = forms.CharField(
label="If 'Yes', what year?",
widget=forms.Select(choices=PAST_FUNDING_YEAR_CHOICES),
required=False,
)
other_fellowship = forms.TypedChoiceField(
label="""
Do you currently hold another Federal fellowship or traineeship?
""",
choices=BINARY_CHOICES,
widget=forms.RadioSelect(),
)
class Meta:
"""Attributes about the form and options."""
model = CollegiateRocketCompetition
exclude = (
'complete',
'user',
'status',
'funded_code',
'funds_authorized',
'authorized_match',
'award_acceptance',
'interim_report',
'final_report',
'other_file',
'other_file2',
'other_file3',
'url1',
'url2',
'url3',
)
def __init__(self, *args, **kwargs):
"""Override of the initialization method to set team choices."""
super(CollegiateRocketCompetitionForm, self).__init__(*args, **kwargs)
self.fields['team'].queryset = RocketLaunchTeam.objects.annotate(
count=Count('members'),
).filter(competition__in=["Collegiate Rocket Competition"]).filter(
date_created__gte=get_start_date(),
).exclude(
count__gte=settings.ROCKET_LAUNCH_COMPETITION_TEAM_LIMIT,
).order_by("name")
class CollegiateRocketCompetitionUploadsForm(forms.ModelForm):
"""Collegiate Rocket Competition uploads form."""
class Meta:
"""Attributes about the form and options."""
model = CollegiateRocketCompetition
fields = (
'award_acceptance', 'other_file', 'other_file2', 'other_file3',
)
class NasaCompetitionForm(forms.ModelForm):
"""NASA Competition form."""
past_funding = forms.TypedChoiceField(
label="Have you received WSGC funding within the past five years?",
choices=BINARY_CHOICES,
widget=forms.RadioSelect(),
)
past_funding_year = forms.CharField(
label="If 'Yes', what year?",
widget=forms.Select(choices=PAST_FUNDING_YEAR_CHOICES),
required=False,
)
program_acceptance = forms.TypedChoiceField(
label="Has your team applied and been accepted into the program?",
choices=BINARY_CHOICES,
widget=forms.RadioSelect(),
)
grants_officer = forms.CharField(
label="Authorized User",
required=False,
help_text="""
I authorize the individual listed above to submit
the required documents associated with this proposal on my behalf.
(NOTE: In order to choose an Authorized User, the individual must be
registered with WSGC prior to submitting this application.)
""",
)
def __init__(self, *args, **kwargs):
"""Override of the initialization method to obtain the request object."""
self.request = kwargs.pop('request', None)
super(NasaCompetitionForm, self).__init__(*args, **kwargs)
class Meta:
"""Attributes about the form and options."""
model = NasaCompetition
exclude = (
'complete',
'user',
'status',
'funded_code',
'funds_authorized',
'authorized_match',
'award_acceptance',
'final_report',
'other_file',
'other_file2',
'other_file3',
'interim_report',
'invoice_q1',
'invoice_q2',
'invoice_q3',
'invoice_q4',
'institutional_w9',
'photos_overview',
'publications_overview',
'budget_modification',
'performance_modification',
'scope_modification',
'no_cost_extension',
'intended_program_match',
'close_out_finance_document',
'url1',
'url2',
'url3',
'team_photo',
'team_biography',
)
def clean(self):
"""Deal with grants officer and 'other' fields if need be."""
cd = self.cleaned_data
gid = cd.get('grants_officer')
uid = str(self.request.user.id)
if cd.get("competition_type") == "Other":
if cd.get("competition_type_other") == "":
self.add_error('competition_type_other', "Required field")
if cd.get("facility_name") == "Other":
if cd.get("facility_name_other") == "":
self.add_error('facility_name_other', "Required field")
# Assign a User object to grants officer
if gid:
if gid == uid:
self.add_error(
'grants_officer',
"You cannot also be an authorized user",
)
cd['grants_officer'] = None
else:
try:
user = User.objects.get(pk=gid)
if user.profile:
cd['grants_officer'] = user
self.request.session['grants_officer_name'] = '{0}, {1}'.format(
user.last_name, user.first_name,
)
else:
self.add_error(
'grants_officer',
"This user does not have a complete profile",
)
cd['grants_officer'] = None
except Exception:
self.add_error(
'grants_officer',
"That User does not exist in the system",
)
else:
cd['grants_officer'] = None
class NasaCompetitionUploadsForm(forms.ModelForm):
"""NASA Competition uploads form."""
class Meta:
"""Attributes about the form and options."""
model = NasaCompetition
fields = (
'award_acceptance',
'final_report',
'interim_report',
'invoice_q1',
'invoice_q2',
'invoice_q3',
'invoice_q4',
'institutional_w9',
'photos_overview',
'publications_overview',
'budget_modification',
'performance_modification',
'scope_modification',
'no_cost_extension',
'intended_program_match',
'close_out_finance_document',
'other_file',
'other_file2',
'other_file3',
'team_photo',
'team_biography',
)
class IndustryInternshipForm(forms.ModelForm):
"""Industry Internship form."""
past_funding = forms.TypedChoiceField(
label="Have you received WSGC funding within the past five years?",
choices=BINARY_CHOICES,
widget=forms.RadioSelect(),
)
past_funding_year = forms.CharField(
label="If 'Yes', what year?",
widget=forms.Select(choices=PAST_FUNDING_YEAR_CHOICES),
required=False,
)
grants_officer = forms.CharField(
label="Authorized User",
required=False,
help_text="""
I authorize the individual listed above to submit
the required documents associated with this proposal on my behalf.
(NOTE: In order to choose an Authorized User, the individual must be
registered with WSGC prior to submitting this application.)
""",
)
def __init__(self, *args, **kwargs):
"""Override of the initialization method to obtain the request object."""
self.request = kwargs.pop('request', None)
super(IndustryInternshipForm, self).__init__(*args, **kwargs)
class Meta:
"""Attributes about the form and options."""
model = IndustryInternship
exclude = (
'complete',
'user',
'status',
'funded_code',
'work_plan',
'authorized_match',
'award_acceptance',
'final_report',
'other_file',
'other_file2',
'other_file3',
'interim_report',
'invoice_q1',
'invoice_q2',
'invoice_q3',
'invoice_q4',
'intended_program_match',
'close_out_finance_document',
'funds_authorized',
'url1',
'url2',
'url3',
)
def clean(self):
"""Deal with grants officer."""
cd = self.cleaned_data
gid = cd.get('grants_officer')
uid = str(self.request.user.id)
# Assign | |
<filename>scripts/maint/updateFallbackDirs.py
#!/usr/bin/env python
# Usage:
#
# Regenerate the list:
# scripts/maint/updateFallbackDirs.py > src/or/fallback_dirs.inc 2> fallback_dirs.log
#
# Check the existing list:
# scripts/maint/updateFallbackDirs.py check_existing > fallback_dirs.inc.ok 2> fallback_dirs.log
# mv fallback_dirs.inc.ok src/or/fallback_dirs.inc
#
# This script should be run from a stable, reliable network connection,
# with no other network activity (and not over tor).
# If this is not possible, please disable:
# PERFORM_IPV4_DIRPORT_CHECKS and PERFORM_IPV6_DIRPORT_CHECKS
#
# Needs dateutil, stem, and potentially other python packages.
# Optionally uses ipaddress (python 3 builtin) or py2-ipaddress (package)
# for netblock analysis.
#
# Then read the logs to make sure the fallbacks aren't dominated by a single
# netblock or port.
# Script by weasel, April 2015
# Portions by gsathya & karsten, 2013
# https://trac.torproject.org/projects/tor/attachment/ticket/8374/dir_list.2.py
# Modifications by teor, 2015
import StringIO
import string
import re
import datetime
import gzip
import os.path
import json
import math
import sys
import urllib
import urllib2
import hashlib
import dateutil.parser
# bson_lazy provides bson
#from bson import json_util
import copy
import re
from stem.descriptor import DocumentHandler
from stem.descriptor.remote import get_consensus, get_server_descriptors, MAX_FINGERPRINTS
import logging
logging.root.name = ''
HAVE_IPADDRESS = False
try:
# python 3 builtin, or install package py2-ipaddress
# there are several ipaddress implementations for python 2
# with slightly different semantics with str typed text
# fortunately, all our IP addresses are in unicode
import ipaddress
HAVE_IPADDRESS = True
except ImportError:
# if this happens, we avoid doing netblock analysis
logging.warning('Unable to import ipaddress, please install py2-ipaddress.' +
' A fallback list will be created, but optional netblock' +
' analysis will not be performed.')
## Top-Level Configuration
# We use semantic versioning: https://semver.org
# In particular:
# * major changes include removing a mandatory field, or anything else that
# would break an appropriately tolerant parser,
# * minor changes include adding a field,
# * patch changes include changing header comments or other unstructured
# content
FALLBACK_FORMAT_VERSION = '2.0.0'
SECTION_SEPARATOR_BASE = '====='
SECTION_SEPARATOR_COMMENT = '/* ' + SECTION_SEPARATOR_BASE + ' */'
# Output all candidate fallbacks, or only output selected fallbacks?
OUTPUT_CANDIDATES = False
# Perform DirPort checks over IPv4?
# Change this to False if IPv4 doesn't work for you, or if you don't want to
# download a consensus for each fallback
# Don't check ~1000 candidates when OUTPUT_CANDIDATES is True
PERFORM_IPV4_DIRPORT_CHECKS = False if OUTPUT_CANDIDATES else True
# Perform DirPort checks over IPv6?
# If you know IPv6 works for you, set this to True
# This will exclude IPv6 relays without an IPv6 DirPort configured
# So it's best left at False until #18394 is implemented
# Don't check ~1000 candidates when OUTPUT_CANDIDATES is True
PERFORM_IPV6_DIRPORT_CHECKS = False if OUTPUT_CANDIDATES else False
# Must relays be running now?
MUST_BE_RUNNING_NOW = (PERFORM_IPV4_DIRPORT_CHECKS
or PERFORM_IPV6_DIRPORT_CHECKS)
# Clients have been using microdesc consensuses by default for a while now
DOWNLOAD_MICRODESC_CONSENSUS = True
# If a relay delivers an expired consensus, if it expired less than this many
# seconds ago, we still allow the relay. This should never be less than -90,
# as all directory mirrors should have downloaded a consensus 90 minutes
# before it expires. It should never be more than 24 hours, because clients
# reject consensuses that are older than REASONABLY_LIVE_TIME.
# For the consensus expiry check to be accurate, the machine running this
# script needs an accurate clock.
#
# Relays on 0.3.0 and later return a 404 when they are about to serve an
# expired consensus. This makes them fail the download check.
# We use a tolerance of 0, so that 0.2.x series relays also fail the download
# check if they serve an expired consensus.
CONSENSUS_EXPIRY_TOLERANCE = 0
# Output fallback name, flags, bandwidth, and ContactInfo in a C comment?
OUTPUT_COMMENTS = True if OUTPUT_CANDIDATES else False
# Output matching ContactInfo in fallbacks list or the blacklist?
# Useful if you're trying to contact operators
CONTACT_COUNT = True if OUTPUT_CANDIDATES else False
CONTACT_BLACKLIST_COUNT = True if OUTPUT_CANDIDATES else False
# How the list should be sorted:
# fingerprint: is useful for stable diffs of fallback lists
# measured_bandwidth: is useful when pruning the list based on bandwidth
# contact: is useful for contacting operators once the list has been pruned
OUTPUT_SORT_FIELD = 'contact' if OUTPUT_CANDIDATES else 'fingerprint'
## OnionOO Settings
ONIONOO = 'https://onionoo.torproject.org/'
#ONIONOO = 'https://onionoo.thecthulhu.com/'
# Don't bother going out to the Internet, just use the files available locally,
# even if they're very old
LOCAL_FILES_ONLY = False
## Whitelist / Blacklist Filter Settings
# The whitelist contains entries that are included if all attributes match
# (IPv4, dirport, orport, id, and optionally IPv6 and IPv6 orport)
# The blacklist contains (partial) entries that are excluded if any
# sufficiently specific group of attributes matches:
# IPv4 & DirPort
# IPv4 & ORPort
# ID
# IPv6 & DirPort
# IPv6 & IPv6 ORPort
# If neither port is included in the blacklist, the entire IP address is
# blacklisted.
# What happens to entries in neither list?
# When True, they are included, when False, they are excluded
INCLUDE_UNLISTED_ENTRIES = True if OUTPUT_CANDIDATES else False
# If an entry is in both lists, what happens?
# When True, it is excluded, when False, it is included
BLACKLIST_EXCLUDES_WHITELIST_ENTRIES = True
WHITELIST_FILE_NAME = 'scripts/maint/fallback.whitelist'
BLACKLIST_FILE_NAME = 'scripts/maint/fallback.blacklist'
FALLBACK_FILE_NAME = 'src/or/fallback_dirs.inc'
# The number of bytes we'll read from a filter file before giving up
MAX_LIST_FILE_SIZE = 1024 * 1024
## Eligibility Settings
# Require fallbacks to have the same address and port for a set amount of time
# We used to have this at 1 week, but that caused many fallback failures, which
# meant that we had to rebuild the list more often. We want fallbacks to be
# stable for 2 years, so we set it to a few months.
#
# There was a bug in Tor 0.2.8.1-alpha and earlier where a relay temporarily
# submits a 0 DirPort when restarted.
# This causes OnionOO to (correctly) reset its stability timer.
# Affected relays should upgrade to Tor 0.2.9 or later, which has a fix
# for this issue.
#
# If a relay changes address or port, that's it, it's not useful any more,
# because clients can't find it
ADDRESS_AND_PORT_STABLE_DAYS = 90
# We ignore relays that have been down for more than this period
MAX_DOWNTIME_DAYS = 0 if MUST_BE_RUNNING_NOW else 7
# FallbackDirs must have a time-weighted-fraction that is greater than or
# equal to:
# Mirrors that are down half the time are still useful half the time
CUTOFF_RUNNING = .50
CUTOFF_V2DIR = .50
# Guard flags are removed for some time after a relay restarts, so we ignore
# the guard flag.
CUTOFF_GUARD = .00
# FallbackDirs must have a time-weighted-fraction that is less than or equal
# to:
# .00 means no bad exits
PERMITTED_BADEXIT = .00
# older entries' weights are adjusted with ALPHA^(age in days)
AGE_ALPHA = 0.99
# this factor is used to scale OnionOO entries to [0,1]
ONIONOO_SCALE_ONE = 999.
## Fallback Count Limits
# The target for these parameters is 20% of the guards in the network
# This is around 200 as of October 2015
_FB_POG = 0.2
FALLBACK_PROPORTION_OF_GUARDS = None if OUTPUT_CANDIDATES else _FB_POG
# Limit the number of fallbacks (eliminating lowest by advertised bandwidth)
MAX_FALLBACK_COUNT = None if OUTPUT_CANDIDATES else 200
# Emit a C #error if the number of fallbacks is less than expected
MIN_FALLBACK_COUNT = 0 if OUTPUT_CANDIDATES else MAX_FALLBACK_COUNT*0.5
# The maximum number of fallbacks on the same address, contact, or family
#
# With 150 fallbacks, this means each operator sees 5% of client bootstraps.
# For comparison:
# - We try to limit guard and exit operators to 5% of the network
# - The directory authorities used to see 11% of client bootstraps each
#
# We also don't want too much of the list to go down if a single operator
# has to move all their relays.
MAX_FALLBACKS_PER_IP = 1
MAX_FALLBACKS_PER_IPV4 = MAX_FALLBACKS_PER_IP
MAX_FALLBACKS_PER_IPV6 = MAX_FALLBACKS_PER_IP
MAX_FALLBACKS_PER_CONTACT = 7
MAX_FALLBACKS_PER_FAMILY = 7
## Fallback Bandwidth Requirements
# Any fallback with the Exit flag has its bandwidth multipled by this fraction
# to make sure we aren't further overloading exits
# (Set to 1.0, because we asked that only lightly loaded exits opt-in,
# and the extra load really isn't that much for large relays.)
EXIT_BANDWIDTH_FRACTION = 1.0
# If a single fallback's bandwidth is too low, it's pointless adding it
# We expect fallbacks to handle an extra 10 kilobytes per second of traffic
# Make sure they can support fifty times the expected extra load
#
# We convert this to a consensus weight before applying the filter,
# because all the bandwidth amounts are specified by the relay
MIN_BANDWIDTH = 50.0 * 10.0 * 1024.0
# Clients will time out after 30 seconds trying to download a | |
temp <<= 1
if (self.flags & FC):
temp |= 1 # aka FC
if (temp & 0x100):
self.flags |= FC
else:
self.flags &= (~FC & 0xff)
temp &= 0xff
self.assign_then_set_flags(operand_ref, OperandRef(BYTE_VAL, temp))
# #define ROR(data) \
# { \
# temp = data; \
# if (flags & FC) temp |= 0x100; \
# if (temp & 1) flags |= FC; \
# else flags &= ~FC; \
# temp >>= 1; \
# ASSIGNSETFLAGS(data, temp); \
# }
def ROR(self, operand_ref):
temp = operand_ref.get_byte(self)
if (self.flags & FC):
temp |= 0x100
if (temp & 1):
self.flags |= FC
else:
self.flags &= (~FC & 0xff)
temp >>= 1
self.assign_then_set_flags(operand_ref, OperandRef(BYTE_VAL, temp))
# #define DEC(data) \
# { \
# temp = data - 1; \
# ASSIGNSETFLAGS(data, temp); \
# }
def DEC(self, operand_ref):
temp = operand_ref.get_byte(self) - 1
temp &= 0xff
self.assign_then_set_flags(operand_ref, OperandRef(BYTE_VAL, temp))
# #define INC(data) \
# { \
# temp = data + 1; \
# ASSIGNSETFLAGS(data, temp); \
# }
def INC(self, operand_ref):
temp = operand_ref.get_byte(self) + 1
temp &= 0xff
self.assign_then_set_flags(operand_ref, OperandRef(BYTE_VAL, temp))
# #define EOR(data) \
# { \
# a ^= data; \
# SETFLAGS(a); \
# }
def EOR(self, operand_ref):
self.a ^= operand_ref.get_byte(self)
self.set_flags(self.a)
# #define ORA(data) \
# { \
# a |= data; \
# SETFLAGS(a); \
# }
def ORA(self, operand_ref):
self.a |= operand_ref.get_byte(self)
self.set_flags(self.a)
# #define AND(data) \
# { \
# a &= data; \
# SETFLAGS(a) \
# }
def AND(self, operand_ref):
self.a &= operand_ref.get_byte(self)
self.set_flags(self.a)
# #define BIT(data) \
# { \
# flags = (flags & ~(FN|FV)) | \
# (data & (FN|FV)); \
# if (!(data & a)) flags |= FZ; \
# else flags &= ~FZ; \
# }
def BIT(self, operand_ref):
temp = operand_ref.get_byte(self)
self.flags = (self.flags & ~(FN | FV) & 0xff) | (temp & (FN | FV))
if not (temp & self.a):
self.flags |= FZ
else:
self.flags &= (~FZ & 0xff)
# void initcpu(unsigned short newpc, unsigned char newa, unsigned char newx, unsigned char newy)
# {
# pc = newpc;
# a = newa;
# x = newx;
# y = newy;
# flags = 0;
# sp = 0xff;
# cpucycles = 0;
# }
def init_cpu(self, newpc, newa=0, newx=0, newy=0, flags=FU):
self.pc = newpc
self.a = newa
self.x = newx
self.y = newy
self.flags = flags
self.sp = 0xff
self.cpucycles = 0
self.invocationCount = -1
# ---------------------------------------------------------------------------
# int runcpu(void)
# {
# unsigned temp;
#
# unsigned char op = FETCH();
# /* printf("PC: %04x OP: %02x A:%02x X:%02x Y:%02x\n", pc-1, op, a, x, y); */
# cpucycles += cpucycles_table[op];
# switch(op)
# {
def runcpu(self):
# execute instruction.
# If RTS/RTI (when stack empty) or BRK, return 0, else return 1
# Throw exception on the not-yet-implemented pseduo-op codes
if self.debug:
self.invocationCount += 1
output_str = "{:08d},PC=${:04x},A=${:02x},X=${:02x},Y=${:02x},SP=${:02x},P=%{:08b}" \
.format(self.cpucycles, self.pc, self.a, self.x, self.y, self.sp, self.flags)
print(output_str)
# Useful for some of the Wolfgang Lorenz tests:
"""
print("data b a r ${:02x} ${:02x} ${:02x}".format(self.memory[0x08ec], self.memory[0x08f2], self.memory[0x08f8]))
print("accum b a r ${:02x} ${:02x} ${:02x}".format(self.memory[0x08ed], self.memory[0x08f3], self.memory[0x08f9]))
print("x b a r ${:02x} ${:02x} ${:02x}".format(self.memory[0x08ee], self.memory[0x08f4], self.memory[0x08fa]))
print("y b a r ${:02x} ${:02x} ${:02x}".format(self.memory[0x08ef], self.memory[0x08f5], self.memory[0x08fb]))
print("flags b a r ${:08b} ${:08b} ${:08b}".format(self.memory[0x08f0], self.memory[0x08f6], self.memory[0x08fc]))
print("stackptr b a r ${:08b} ${:08b} ${:08b}".format(self.memory[0x08f1], self.memory[0x08f7], self.memory[0x08fd]))
"""
instruction = self.fetch()
self.last_instruction = instruction
self.cpucycles += cpucycles_table[instruction]
# Had converted the C case statement to a bunch of elif statements. However, pylint can't handle
# that many ("Maximum recursion depth exceeded"), so converted all to if statements with a return
# after each one.
# case 0x69:
# ADC(IMMEDIATE());
# pc++;
# break;
#
# case 0x65:
# ADC(MEM(ZEROPAGE()));
# pc++;
# break;
#
# case 0x75:
# ADC(MEM(ZEROPAGEX()));
# pc++;
# break;
#
# case 0x6d:
# ADC(MEM(ABSOLUTE()));
# pc += 2;
# break;
#
# case 0x7d:
# cpucycles += EVALPAGECROSSING_ABSOLUTEX();
# ADC(MEM(ABSOLUTEX()));
# pc += 2;
# break;
#
# case 0x79:
# cpucycles += EVALPAGECROSSING_ABSOLUTEY();
# ADC(MEM(ABSOLUTEY()));
# pc += 2;
# break;
#
# case 0x61:
# ADC(MEM(INDIRECTX()));
# pc++;
# break;
#
# case 0x71:
# cpucycles += EVALPAGECROSSING_INDIRECTY();
# ADC(MEM(INDIRECTY()));
# pc++;
# break;
# ADC instructions
if instruction == 0x69: # $69/105 ADC #n
self.ADC(OperandRef(BYTE_VAL, self.immediate()))
self.pc += 1
return 1
if instruction == 0x65: # $65/101 ADC zp
self.ADC(OperandRef(LOC_VAL, self.zeropage()))
self.pc += 1
return 1
if instruction == 0x75: # $75/117 ADC zp,X
self.ADC(OperandRef(LOC_VAL, self.zeropage_x()))
self.pc += 1
return 1
if instruction == 0x6d: # $6D/109 ADC abs
self.ADC(OperandRef(LOC_VAL, self.absolute()))
self.pc += 2
return 1
if instruction == 0x7d: # $7D/125 ADC abs,X
self.cpucycles += self.eval_page_crossing_absolute_x()
self.ADC(OperandRef(LOC_VAL, self.absolute_x()))
self.pc += 2
return 1
if instruction == 0x79: # $79/121 ADC abs,Y
self.cpucycles += self.eval_page_crossing_absolute_y()
self.ADC(OperandRef(LOC_VAL, self.absolute_y()))
self.pc += 2
return 1
if instruction == 0x61: # $61/97 ADC (zp,X)
self.ADC(OperandRef(LOC_VAL, self.indirect_x()))
self.pc += 1
return 1
if instruction == 0x71: # $71/113 ADC (zp),Y
self.cpucycles += self.eval_page_crossing_indirect_y()
self.ADC(OperandRef(LOC_VAL, self.indirect_y()))
self.pc += 1
return 1
# case 0x29:
# AND(IMMEDIATE());
# pc++;
# break;
#
# case 0x25:
# AND(MEM(ZEROPAGE()));
# pc++;
# break;
#
# case 0x35:
# AND(MEM(ZEROPAGEX()));
# pc++;
# break;
#
# case 0x2d:
# AND(MEM(ABSOLUTE()));
# pc += 2;
# break;
#
# case 0x3d:
# cpucycles += EVALPAGECROSSING_ABSOLUTEX();
# AND(MEM(ABSOLUTEX()));
# pc += 2;
# break;
#
# case 0x39:
# cpucycles += EVALPAGECROSSING_ABSOLUTEY();
# AND(MEM(ABSOLUTEY()));
# pc += 2;
# break;
#
# case 0x21:
# AND(MEM(INDIRECTX()));
# pc++;
# break;
#
# case 0x31:
# cpucycles += EVALPAGECROSSING_INDIRECTY();
# AND(MEM(INDIRECTY()));
# pc++;
# break;
# AND instructions
if instruction == 0x29: # $29/41 AND #n
self.AND(OperandRef(BYTE_VAL, self.immediate()))
self.pc += 1
return 1
if instruction == 0x25: # $25/37 AND zp
self.AND(OperandRef(LOC_VAL, self.zeropage()))
self.pc += 1
return 1
if instruction == 0x35: # $35/53 AND zp,X
self.AND(OperandRef(LOC_VAL, self.zeropage_x()))
self.pc += 1
return 1
if instruction == 0x2d: # $2D/45 AND abs
self.AND(OperandRef(LOC_VAL, self.absolute()))
self.pc += 2
return 1
if instruction == 0x3d: # $3D/61 AND abs,X
self.cpucycles += self.eval_page_crossing_absolute_x()
self.AND(OperandRef(LOC_VAL, self.absolute_x()))
self.pc += 2
return 1
if instruction == 0x39: # $39/57 AND abs,Y
self.cpucycles += self.eval_page_crossing_absolute_y()
self.AND(OperandRef(LOC_VAL, self.absolute_y()))
self.pc += 2
return 1
if instruction == 0x21: # $21/33 AND (zp,X)
self.AND(OperandRef(LOC_VAL, self.indirect_x()))
self.pc += 1
return 1
if instruction == 0x31: # $31/49 AND (zp),Y
self.cpucycles += self.eval_page_crossing_indirect_y()
self.AND(OperandRef(LOC_VAL, self.indirect_y()))
self.pc += 1
return 1
# case 0x0a:
# ASL(a);
# break;
#
# case 0x06:
# ASL(MEM(ZEROPAGE()));
# pc++;
# break;
#
# case 0x16:
# ASL(MEM(ZEROPAGEX()));
# pc++;
# break;
#
# case 0x0e:
# ASL(MEM(ABSOLUTE()));
# pc += 2;
# break;
#
# case 0x1e:
# ASL(MEM(ABSOLUTEX()));
# pc += 2;
# break;
# ASL instructions
if instruction == 0x0a: # $0A/10 ASL A
self.ASL(A_OPREF)
return 1
if instruction == 0x06: # $06/6 ASL zp
self.ASL(OperandRef(LOC_VAL, self.zeropage()))
self.pc += 1
return 1
if instruction == 0x16: # $16/22 ASL zp,X
self.ASL(OperandRef(LOC_VAL, self.zeropage_x()))
self.pc += 1
return 1
if instruction == 0x0e: # $0E/14 ASL abs
self.ASL(OperandRef(LOC_VAL, self.absolute()))
self.pc += 2
return 1
if instruction == 0x1e: # $1E/30 ASL abs,X
self.ASL(OperandRef(LOC_VAL, self.absolute_x()))
self.pc += 2
return 1
# case 0x90:
# if (!(flags & FC)) BRANCH()
# else pc++;
# break;
# BCC instruction
if instruction == 0x90: # $90/144 BCC rel
if not (self.flags & FC):
self.branch()
else:
self.pc += 1
return 1
# case 0xb0:
# if (flags & FC) BRANCH()
# else pc++;
# break;
# BCS instruction
if instruction == 0xb0: # $B0/176 BCS rel
if (self.flags & FC):
self.branch()
else:
self.pc += 1
return 1
# case 0xf0:
# if (flags & FZ) BRANCH()
# else pc++;
# break;
# BEQ instruction
if instruction == 0xf0: # $F0/240 BEQ rel
if (self.flags & FZ):
self.branch()
else:
self.pc += 1
return 1
# case 0x24:
# BIT(MEM(ZEROPAGE()));
# pc++;
# break;
#
# case 0x2c:
# BIT(MEM(ABSOLUTE()));
# pc += | |
#!/usr/bin/python
# OpenGL 1.4 code
import math
import os
import numpy
from freetype import *
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
import pygame, pygame.image
from pygame.locals import *
width = 1600
height = 960
arrIcons = []
texIdB_P = None
texIdB_R = None
texIdNode = None
texIdPane = None
video_flags = None
bUpdate = False
bCTRLpressed = False
ftRegular = None
ftBold = None
#########################################################
'''
from pygame.locals import *
from ctypes import windll
user32 = windll.user32
ShowWindow = user32.ShowWindow
IsZoomed = user32.IsZoomed
SW_MAXIMIZE = 3
SW_RESTORE = 9
def getSDLWindow():
return pygame.display.get_wm_info()['window']
def SDL_Maximize():
return ShowWindow(getSDLWindow(), SW_MAXIMIZE)
def SDL_Restore():
return ShowWindow(getSDLWindow(), SW_RESTORE)
def SDL_IsMaximized():
return IsZoomed(getSDLWindow())
'''
#########################################################
def enableMouseMotion():
pygame.event.set_allowed(MOUSEMOTION)
def disableMouseMotion():
pygame.event.set_blocked(MOUSEMOTION)
#########################################################
def loadTexture(pathImg, lp=0, rp=0, tp=0, bp=0):
bg_image = pygame.image.load(pathImg).convert_alpha()
w = bg_image.get_width()
h = bg_image.get_height()
bg_data = pygame.image.tostring(bg_image,"RGBA", 0)
glEnable(GL_TEXTURE_2D)
texId = glGenTextures(1)
glBindTexture(GL_TEXTURE_2D, texId)
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, w, h, 0, GL_RGBA, GL_UNSIGNED_BYTE, bg_data)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
return (texId, w, h, lp, rp, tp, bp)
#########################################################
class customfont:
def __init__(self, pthFont, nSize):
self.pthFont = pthFont
self.nSize = nSize
self.base = 0
self.texfont = 0
self.fontheight = None
self.fontwidth = None
self.advanceX = None
self.advanceY = None
def enableTexture(self):
glBindTexture( GL_TEXTURE_2D, self.texfont)
glEnable(GL_TEXTURE_2D)
def disableTexture(self):
glBindTexture( GL_TEXTURE_2D, 0)
glDisable(GL_TEXTURE_2D)
def renderChar(self, chardata):
glCallList(self.base + ord(chardata) )
def renderStr(self, text):
self.enableTexture()
glListBase( self.base )
glCallLists( [ord(c) for c in text] )
self.disableTexture()
def makefont(self):
vpadding = 6
hpadding = 7
# Load font
face = Face(self.pthFont)
face.set_char_size( self.nSize*64 )
self.advanceX = [0]*128
self.advanceY = [0]*128
bitmap_left = [0]*128
bitmap_top = [0]*128
width, left, height, ascender, descender = 0, 0, 0, 0, 0
for k in range(32,128):
face.load_char( chr(k), FT_LOAD_RENDER | FT_LOAD_FORCE_AUTOHINT )
bitmap = face.glyph.bitmap
self.advanceX[k] = face.glyph.advance.x >> 6
self.advanceY[k] = face.glyph.advance.y >> 6
bitmap_left[k] = face.glyph.bitmap_left
bitmap_top[k] = face.glyph.bitmap_top
width = max( width, bitmap.width)
left = min( left, face.glyph.bitmap_left )
ascender = max( ascender, face.glyph.bitmap_top )
descender = max( descender, bitmap.rows-face.glyph.bitmap_top )
self.fontheight = ascender + descender
height = self.fontheight + vpadding
self.fontwidth = width
width = self.fontwidth - left + hpadding
Z = numpy.zeros((height*6, width*16), dtype=numpy.ubyte)
for j in range(6):
for i in range(16):
k = 32+j*16+i
c = chr(k)
face.load_char(c, FT_LOAD_RENDER | FT_LOAD_FORCE_AUTOHINT )
bitmap = face.glyph.bitmap
x = i*width
y = j*height
x_l = x - left + face.glyph.bitmap_left
x_r = x_l + bitmap.width
y_t = y + ascender - face.glyph.bitmap_top
y_b = y_t + bitmap.rows
Z[y_t:y_b,x_l:x_r].flat = bitmap.buffer
# Bound texture
self.texfont = glGenTextures(1)
glBindTexture( GL_TEXTURE_2D, self.texfont )
glTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR )
glTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR )
glTexImage2D( GL_TEXTURE_2D, 0, GL_ALPHA, Z.shape[1], Z.shape[0], 0, GL_ALPHA, GL_UNSIGNED_BYTE, Z )
# Generate display lists
dx = width/float(Z.shape[1])
dy = height/float(Z.shape[0])
sy = vpadding/float(Z.shape[0])
self.base = glGenLists(8*16)
for i in range(8*16):
c = chr(i)
x = i%16
y = i//16-2
glNewList(self.base+i, GL_COMPILE)
if (i >= 32):
glBegin( GL_QUADS )
glTexCoord2f( (x )*dx, (y)*dy ), glVertex( 0, 0 )
glTexCoord2f( (x+1)*dx, (y)*dy ), glVertex( width, 0 )
glTexCoord2f( (x+1)*dx, (y+1)*dy - sy), glVertex( width, height-vpadding)
glTexCoord2f( (x )*dx, (y+1)*dy - sy), glVertex( 0, height-vpadding )
glEnd( )
glTranslatef( self.advanceX[i], self.advanceY[i], 0 )
glEndList( )
glBindTexture( GL_TEXTURE_2D, 0)
#########################################################
EVT_NONE = 0
EVT_TOUCH_DOWN = 1
EVT_TOUCH_UP = 2
EVT_TOUCH_MOTION = 3
EVT_TOUCH_CLICK = 4
EVT_TOUCH_DBLCLICK = 5
EVT_KEY_DOWN = 6
EVT_KEY_UP = 7
DEFAULT_WIDTH = 10
DEFAULT_HEIGHT = 10
DND_NONE = 0
DND_NODE = 1
DND_CONNECTION = 2
class AnimEvent:
def __init__(self, animType, pHandler):
self.animType = animType
self.pHandler = pHandler
self.bTriggered = False
class UiEvent:
def __init__(self, eType, posX, posY, button):
self.eType = eType
self.posX = posX
self.posY = posY
self.button = button
class uiglobals:
def __init__(self):
pass
@staticmethod
def renderTexture(widget, iTex, x, y, z, w, h):
global arrIcons
texData = arrIcons[iTex]
texId = texData[0]
w = texData[1]
h = texData[2]
offX = x
offY = y
if None != widget:
offX, offY = widget.localToGlobal(offX, offY)
#glDisable(GL_COLOR_MATERIAL)
glEnable(GL_TEXTURE_2D)
glBindTexture(GL_TEXTURE_2D, texId)
glPushMatrix()
glTranslate(offX, offY, z)
glBegin(GL_QUADS)
glTexCoord2f( 0, 0)
glVertex3f( 0, 0, 0)
glTexCoord2f( 1, 0)
glVertex3f( w, 0, 0)
glTexCoord2f( 1, 1)
glVertex3f( w, h, 0)
glTexCoord2f( 0, 1)
glVertex3f( 0, h, 0)
glEnd()
glPopMatrix()
glDisable( GL_TEXTURE_2D )
#glEnable(GL_COLOR_MATERIAL)
@staticmethod
def renderTextureSlice9(widget, iTex, x, y, z, w, h):
''' Assumes 10 pixel slice on all 4 sides '''
global arrIcons
texData = arrIcons[iTex]
texId = texData[0]
tw = texData[1]
th = texData[2]
lp = texData[3] # 10
rp = texData[4] # 10
tp = texData[5] # 10
bp = texData[6] # 10
x0 = 0
x1 = lp
x2 = w-rp
x3 = w
v0 = float(0)/tw
v1L = float(lp)/tw
v1R = float(lp)/tw
v2L = float(tw-rp)/tw
v2R = float(tw-rp)/tw
v3 = float(tw)/tw
y0 = 0
y1 = tp
y2 = h-bp
y3 = h
h0 = float(0.0)/th
h1T = float(tp)/th
h1B = float(tp)/th
h2T = float(th-bp)/th
h2B = float(th-bp)/th
h3 = float(th)/th
offX = x
offY = y
if None != widget:
offX, offY = widget.localToGlobal(offX, offY)
#glDisable(GL_COLOR_MATERIAL)
glEnable(GL_TEXTURE_2D)
glBindTexture(GL_TEXTURE_2D, texId)
glPushMatrix()
glTranslate(offX, offY, z)
glBegin(GL_QUADS)
# TL
glTexCoord2f( 0, 0)
glVertex3f( x0, y0, 0)
glTexCoord2f( v1L, 0)
glVertex3f( x1, y0, 0)
glTexCoord2f( v1L, h1T)
glVertex3f( x1, y1, 0)
glTexCoord2f( 0, h1T)
glVertex3f( x0, y1, 0)
glEnd()
# TM
glBegin(GL_QUADS)
glTexCoord2f( v1R, 0)
glVertex3f( x1, y0, 0)
glTexCoord2f( v2L, 0)
glVertex3f( x2, y0, 0)
glTexCoord2f( v2L, h1T)
glVertex3f( x2, y1, 0)
glTexCoord2f( v1R, h1T)
glVertex3f( x1, y1, 0)
glEnd()
#TR
glBegin(GL_QUADS)
glTexCoord2f( v2R, 0)
glVertex3f( x2, y0, 0)
glTexCoord2f( v3, 0)
glVertex3f( x3, y0, 0)
glTexCoord2f( v3, h1T)
glVertex3f( x3, y1, 0)
glTexCoord2f( v2R, h1T)
glVertex3f( x2, y1, 0)
# ML
glTexCoord2f( 0, h1B)
glVertex3f( x0, y1, 0)
glTexCoord2f( v1L, h1B)
glVertex3f( x1, y1, 0)
glTexCoord2f( v1L, h2T)
glVertex3f( x1, y2, 0)
glTexCoord2f( 0, h2T)
glVertex3f( x0, y2, 0)
# MM
glTexCoord2f( v1R, h1B)
glVertex3f( x1, y1, 0)
glTexCoord2f( v2L, h1B)
glVertex3f( x2, y1, 0)
glTexCoord2f( v2L, h2T)
glVertex3f( x2, y2, 0)
glTexCoord2f( v1R, h2T)
glVertex3f( x1, y2, 0)
# MR
glTexCoord2f( v2R, h1B)
glVertex3f( x2, y1, 0)
glTexCoord2f( v3, h1B)
glVertex3f( x3, y1, 0)
glTexCoord2f( v3, h2T)
glVertex3f( x3, y2, 0)
glTexCoord2f( v2R, h2T)
glVertex3f( x2, y2, 0)
# BL
glTexCoord2f( 0, h2B)
glVertex3f( x0, y2, 0)
glTexCoord2f( v1L, h2B)
glVertex3f( x1, y2, 0)
glTexCoord2f( v1L, h3)
glVertex3f( x1, y3, 0)
glTexCoord2f( 0, h3)
glVertex3f( x0, y3, 0)
# BM
glTexCoord2f( v1R, h2B)
glVertex3f( x1, y2, 0)
glTexCoord2f( v2L, h2B)
glVertex3f( x2, y2, 0)
glTexCoord2f( v2L, h3)
glVertex3f( x2, y3, 0)
glTexCoord2f( v1R, h3)
glVertex3f( x1, y3, 0)
# BR
glTexCoord2f( v2R, h2B)
glVertex3f( x2, y2, 0)
glTexCoord2f( v3, h2B)
glVertex3f( x3, y2, 0)
glTexCoord2f( v3, h3)
glVertex3f( x3, y3, 0)
glTexCoord2f( v2R, h3)
glVertex3f( x2, y3, 0)
glEnd()
glPopMatrix()
glDisable( GL_TEXTURE_2D )
#glEnable(GL_COLOR_MATERIAL)
@staticmethod
def renderBkg(widget, x, y, z, w, h):
offX = x
offY = y
if None != widget:
offX, offY = widget.localToGlobal(offX, offY)
#print "offX, offY = ", offX, offY
#glEnable(GL_COLOR_MATERIAL)
glDisable( GL_TEXTURE_2D )
glPushMatrix()
glTranslate(offX, offY, z)
glBegin(GL_QUADS)
glColor4f(1.0, 1.0, 1.0, 1.0)
glVertex3f( 0, 0, 0)
glVertex3f( w, 0, 0)
glVertex3f( w, h, 0)
glVertex3f( 0, h, 0)
glEnd()
glPopMatrix()
glColor4f(1.0, 1.0, 1.0, 1.0)
glEnable(GL_TEXTURE_2D)
#glDisable(GL_COLOR_MATERIAL)
class widget :
def __init__(self):
global DEFAULT_WIDTH
global DEFAULT_HEIGHT
self.name = "unnamed"
self.m_x = 0
self.m_y = 0
self.m_z = 0
self.m_w = DEFAULT_WIDTH
self.m_h = DEFAULT_HEIGHT
self.m_bShow = True
self.m_bPressed = False
self.m_bGrouped = False
self.m_bAnimInProgress = False
self.m_arrpChildren= []
self.m_numWidgets = 0
self.m_pParent = None
self.m_arrpHandlers = {}
self.m_arrEvent = []
self.m_numEvents = 0
def loadAssets(self):
for child in self.m_arrpChildren:
child.loadAssets()
def redraw(self):
if True == self.m_bShow :
for child in self.m_arrpChildren:
child.redraw()
def show(self, bShow):
self.m_bShow = bShow
def setPressed(self, bPressed):
self.m_bPressed = bPressed
def setPos(self, x, y):
self.m_x = x
self.m_y = y
def moveByDelta(self, dx, dy):
self.m_x = self.m_x + dx
self.m_y = self.m_y + dy
def setSize(self, w, h):
self.m_w = w
self.m_h = h
def addChildWidget(self, child):
self.m_arrpChildren.append(child)
nId = len(self.m_arrpChildren) - 1
child.setParent(self)
return nId
def localToGlobal(self, x, y):
if None != self.m_pParent :
x, y = self.m_pParent.localToGlobal(x, y)
x = x + self.m_x
y = y + self.m_y
return x, y
def globalToLocal(self, x, y):
if None != self.m_pParent :
x, y = self.m_pParent.globalToLocal(x, y)
x = x - self.m_x
y = y - self.m_y
return x, y
def toLocal(self, x, y):
x = x - self.m_x
y = y - self.m_y
return x, y
def toParent(self, parentName, x, y):
if parentName != self.name:
x = x + self.m_x
y = y + self.m_y
if None != self.m_pParent :
x, y = self.m_pParent.toParent(parentName, x, y)
return x, y
def containsPoint(self, lx, ly):
bReturn = False
return bReturn
def getTopmostChildAt(self, lx, ly, belowChild=None):
tmpChild = None
ptX, ptY = lx, ly
numChildren = len(self.m_arrpChildren)
i = numChildren - 1
while (None == tmpChild) and (i >= 0):
child = self.m_arrpChildren[i]
if (ptX >= child.m_x) and (ptX <= child.m_x + child.m_w ) and (ptY >= child.m_y) and (ptY <= child.m_y + child.m_h) :
if None == belowChild :
tmpChild = child
else:
if belowChild.m_z > child.m_z:
tmpChild = child
i = i - 1
return tmpChild
def setParent(self, pParent):
self.m_pParent = pParent
def processEvent(self, pUiEvent):
bProcessed = False
bRedraw = False
bNeedRedraw0 = False
bNeedRedraw1 = False
bNeedRedraw2 = False
bNeedRedraw3 = False
# obtain child widget at nearest Z
if EVT_TOUCH_DOWN <= pUiEvent.eType or EVT_TOUCH_DBLCLICK >= pUiEvent.eType :
lx, ly = self.toLocal(pUiEvent.posX, pUiEvent.posY)
child = self.getTopmostChildAt(lx, ly)
if None != child :
pNewUiEvent = UiEvent(pUiEvent.eType, lx, ly, pUiEvent.button)
bProcessed, bRedraw = child.processEvent(pNewUiEvent)
if (False == bProcessed) and (True == self.m_bShow):
pGroupParent = None
if True == self.m_bGrouped :
if None != self.m_pParent :
pGroupParent = self.m_pParent
if None != pGroupParent :
bNeedRedraw0 = pGroupParent.executePreHandler(self, pUiEvent.eType)
bNeedRedraw1 = self.executePreHandler(self, pUiEvent.eType)
if True == self.m_arrpHandlers.has_key(pUiEvent.eType):
pHandler = self.m_arrpHandlers[pUiEvent.eType]
if None != pHandler :
pHandler(self, pUiEvent)
bProcessed = True
#print "bProcessed", bProcessed
bNeedRedraw2 = self.executePostHandler(self, pUiEvent.eType)
if None != pGroupParent :
bNeedRedraw3 = pGroupParent.executePostHandler(self, pUiEvent.eType)
bRedraw = bRedraw or bNeedRedraw0 or bNeedRedraw1 or bNeedRedraw2 or bNeedRedraw3
return bProcessed, bRedraw
def setHandler(self, eType, pHandler):
self.m_arrpHandlers[eType] = pHandler
def executePreHandler(self, widget, event):
return False
def executePostHandler(self, widget, event):
return False
def setGrouped(self, bGrouped):
self.m_bGrouped = bGrouped
def postEvent(self, pEvent):
bPresentInEventList = False
for pTmp in self.m_arrEvent:
if pTmp.eType == pEvent.eType:
pTmp.bTriggered = True
bPresentInEventList = True
if False == bPresentInEventList:
if None != self.m_pParent:
self.m_pParent.postEvent(pEvent)
def addEventHandler(self, animType, pHandler):
animevent = AnimEvent(animType, pHandler)
self.m_arrEvent.append(animevent)
def updateAnimation(self):
bUpdated = False
for pAnimTmp in self.m_arrEvent:
if True == pAnimTmp.bTriggered:
bDone = pAnimTmp.pHandler(self)
if True == bDone:
pAnimTmp.bTriggered = False
bUpdated = True
for tmp in self.m_arrpChildren:
if None != pTmp:
bUpdated = bUpdated | (pTmp.updateAnimation())
return bUpdated
class image(widget):
def __init__(self):
widget.__init__(self)
self.texReleased = None
def setReleasedImage(self, texReleased):
self.texReleased = texReleased
def redraw(self):
if True == self.m_bShow:
if False == self.m_bPressed:
if None != self.texReleased:
uiglobals.renderTextureSlice9(self, self.texReleased, 0, 0, -1, self.m_w, self.m_h)
widget.redraw(self)
class clickableimage(image):
def __init__(self):
image.__init__(self)
self.texReleased = None
def setPressedImage(self, texPressed):
self.texPressed = texPressed
def redraw(self):
if True == self.m_bShow:
if True == self.m_bPressed:
if None != self.texPressed:
uiglobals.renderTextureSlice9(self, self.texPressed, 0, 0, | |
import torch
import torch.nn as nn
import torch.nn.functional as F
class SupConUnet(nn.Module):
def __init__(self, num_classes, in_channels=1, initial_filter_size=64,
kernel_size=3, do_instancenorm=True, mode="cls"):
super(SupConUnet, self).__init__()
self.encoder = UNet(num_classes, in_channels, initial_filter_size, kernel_size, do_instancenorm)
if mode == 'mlp':
self.head = nn.Sequential(nn.Conv2d(initial_filter_size, 256, kernel_size=1),
nn.Conv2d(256, num_classes, kernel_size=1))
elif mode == "cls":
self.head = nn.Conv2d(initial_filter_size, num_classes, kernel_size=1)
else:
raise NotImplemented("This mode is not supported yet")
def forward(self, x):
y = self.encoder(x)
output = self.head(y)
# output = F.normalize(self.head(y), dim=1)
return output
class SupConUnetInfer(nn.Module):
def __init__(self, num_classes, in_channels=1, initial_filter_size=64,
kernel_size=3, do_instancenorm=True):
super(SupConUnetInfer, self).__init__()
self.encoder = UNet(num_classes, in_channels, initial_filter_size, kernel_size, do_instancenorm)
self.head = nn.Conv2d(initial_filter_size, num_classes, kernel_size=1)
def forward(self, x):
y = self.encoder(x)
output = self.head(y)
# output = F.normalize(self.head(y), dim=1)
return y, output
class UNet(nn.Module):
def __init__(self, num_classes, in_channels=1, initial_filter_size=64, kernel_size=3, do_instancenorm=True):
super().__init__()
self.contr_1_1 = self.contract(in_channels, initial_filter_size, kernel_size, instancenorm=do_instancenorm)
self.contr_1_2 = self.contract(initial_filter_size, initial_filter_size, kernel_size, instancenorm=do_instancenorm)
self.pool = nn.MaxPool2d(2, stride=2)
self.contr_2_1 = self.contract(initial_filter_size, initial_filter_size*2, kernel_size, instancenorm=do_instancenorm)
self.contr_2_2 = self.contract(initial_filter_size*2, initial_filter_size*2, kernel_size, instancenorm=do_instancenorm)
# self.pool2 = nn.MaxPool2d(2, stride=2)
self.contr_3_1 = self.contract(initial_filter_size*2, initial_filter_size*2**2, kernel_size, instancenorm=do_instancenorm)
self.contr_3_2 = self.contract(initial_filter_size*2**2, initial_filter_size*2**2, kernel_size, instancenorm=do_instancenorm)
# self.pool3 = nn.MaxPool2d(2, stride=2)
self.contr_4_1 = self.contract(initial_filter_size*2**2, initial_filter_size*2**3, kernel_size, instancenorm=do_instancenorm)
self.contr_4_2 = self.contract(initial_filter_size*2**3, initial_filter_size*2**3, kernel_size, instancenorm=do_instancenorm)
# self.pool4 = nn.MaxPool2d(2, stride=2)
self.center = nn.Sequential(
nn.Conv2d(initial_filter_size*2**3, initial_filter_size*2**4, 3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(initial_filter_size*2**4, initial_filter_size*2**4, 3, padding=1),
nn.ReLU(inplace=True),
nn.ConvTranspose2d(initial_filter_size*2**4, initial_filter_size*2**3, 2, stride=2),
nn.ReLU(inplace=True),
)
self.expand_4_1 = self.expand(initial_filter_size*2**4, initial_filter_size*2**3)
self.expand_4_2 = self.expand(initial_filter_size*2**3, initial_filter_size*2**3)
self.upscale4 = nn.ConvTranspose2d(initial_filter_size*2**3, initial_filter_size*2**2, kernel_size=2, stride=2)
self.expand_3_1 = self.expand(initial_filter_size*2**3, initial_filter_size*2**2)
self.expand_3_2 = self.expand(initial_filter_size*2**2, initial_filter_size*2**2)
self.upscale3 = nn.ConvTranspose2d(initial_filter_size*2**2, initial_filter_size*2, 2, stride=2)
self.expand_2_1 = self.expand(initial_filter_size*2**2, initial_filter_size*2)
self.expand_2_2 = self.expand(initial_filter_size*2, initial_filter_size*2)
self.upscale2 = nn.ConvTranspose2d(initial_filter_size*2, initial_filter_size, 2, stride=2)
self.expand_1_1 = self.expand(initial_filter_size*2, initial_filter_size)
self.expand_1_2 = self.expand(initial_filter_size, initial_filter_size)
# Output layer for segmentation
# self.final = nn.Conv2d(initial_filter_size, num_classes, kernel_size=1) # kernel size for final layer = 1, see paper
self.softmax = torch.nn.Softmax2d()
# Output layer for "autoencoder-mode"
self.output_reconstruction_map = nn.Conv2d(initial_filter_size, out_channels=1, kernel_size=1)
@staticmethod
def contract(in_channels, out_channels, kernel_size=3, instancenorm=True):
if instancenorm:
layer = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size, padding=1),
nn.InstanceNorm2d(out_channels),
nn.LeakyReLU(inplace=True))
else:
layer = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size, padding=1),
nn.LeakyReLU(inplace=True))
return layer
@staticmethod
def expand(in_channels, out_channels, kernel_size=3):
layer = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size, padding=1),
nn.LeakyReLU(inplace=True),
)
return layer
@staticmethod
def center_crop(layer, target_width, target_height):
batch_size, n_channels, layer_width, layer_height = layer.size()
xy1 = (layer_width - target_width) // 2
xy2 = (layer_height - target_height) // 2
return layer[:, :, xy1:(xy1 + target_width), xy2:(xy2 + target_height)]
def forward(self, x, enable_concat=True):
concat_weight = 1
if not enable_concat:
concat_weight = 0
contr_1 = self.contr_1_2(self.contr_1_1(x))
pool = self.pool(contr_1)
contr_2 = self.contr_2_2(self.contr_2_1(pool))
pool = self.pool(contr_2)
contr_3 = self.contr_3_2(self.contr_3_1(pool))
pool = self.pool(contr_3)
contr_4 = self.contr_4_2(self.contr_4_1(pool))
pool = self.pool(contr_4)
center = self.center(pool)
crop = self.center_crop(contr_4, center.size()[2], center.size()[3])
concat = torch.cat([center, crop*concat_weight], 1)
expand = self.expand_4_2(self.expand_4_1(concat))
upscale = self.upscale4(expand)
crop = self.center_crop(contr_3, upscale.size()[2], upscale.size()[3])
concat = torch.cat([upscale, crop*concat_weight], 1)
expand = self.expand_3_2(self.expand_3_1(concat))
upscale = self.upscale3(expand)
crop = self.center_crop(contr_2, upscale.size()[2], upscale.size()[3])
concat = torch.cat([upscale, crop*concat_weight], 1)
expand = self.expand_2_2(self.expand_2_1(concat))
upscale = self.upscale2(expand)
crop = self.center_crop(contr_1, upscale.size()[2], upscale.size()[3])
concat = torch.cat([upscale, crop*concat_weight], 1)
expand = self.expand_1_2(self.expand_1_1(concat))
return expand
class DownsampleUnet(nn.Module):
def __init__(self, in_channels=1, initial_filter_size=64, kernel_size=3, do_instancenorm=True):
super().__init__()
self.contr_1_1 = self.contract(in_channels, initial_filter_size, kernel_size, instancenorm=do_instancenorm)
self.contr_1_2 = self.contract(initial_filter_size, initial_filter_size, kernel_size, instancenorm=do_instancenorm)
self.pool = nn.MaxPool2d(2, stride=2)
self.contr_2_1 = self.contract(initial_filter_size, initial_filter_size*2, kernel_size, instancenorm=do_instancenorm)
self.contr_2_2 = self.contract(initial_filter_size*2, initial_filter_size*2, kernel_size, instancenorm=do_instancenorm)
# self.pool2 = nn.MaxPool2d(2, stride=2)
self.contr_3_1 = self.contract(initial_filter_size*2, initial_filter_size*2**2, kernel_size, instancenorm=do_instancenorm)
self.contr_3_2 = self.contract(initial_filter_size*2**2, initial_filter_size*2**2, kernel_size, instancenorm=do_instancenorm)
# self.pool3 = nn.MaxPool2d(2, stride=2)
self.contr_4_1 = self.contract(initial_filter_size*2**2, initial_filter_size*2**3, kernel_size, instancenorm=do_instancenorm)
self.contr_4_2 = self.contract(initial_filter_size*2**3, initial_filter_size*2**3, kernel_size, instancenorm=do_instancenorm)
# self.pool4 = nn.MaxPool2d(2, stride=2)
self.center = nn.Sequential(
nn.Conv2d(initial_filter_size*2**3, initial_filter_size*2**4, 3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(initial_filter_size*2**4, initial_filter_size*2**4, 3, padding=1),
nn.ReLU(inplace=True),
nn.ConvTranspose2d(initial_filter_size*2**4, initial_filter_size*2**3, 2, stride=2),
nn.ReLU(inplace=True),
)
self.softmax = torch.nn.Softmax2d()
@staticmethod
def contract(in_channels, out_channels, kernel_size=3, instancenorm=True):
if instancenorm:
layer = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size, padding=1),
nn.InstanceNorm2d(out_channels),
nn.LeakyReLU(inplace=True))
else:
layer = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size, padding=1),
nn.LeakyReLU(inplace=True))
return layer
@staticmethod
def center_crop(layer, target_width, target_height):
batch_size, n_channels, layer_width, layer_height = layer.size()
xy1 = (layer_width - target_width) // 2
xy2 = (layer_height - target_height) // 2
return layer[:, :, xy1:(xy1 + target_width), xy2:(xy2 + target_height)]
def forward(self, x, enable_concat=True):
concat_weight = 1
if not enable_concat:
concat_weight = 0
contr_1 = self.contr_1_2(self.contr_1_1(x))
pool = self.pool(contr_1)
contr_2 = self.contr_2_2(self.contr_2_1(pool))
pool = self.pool(contr_2)
contr_3 = self.contr_3_2(self.contr_3_1(pool))
pool = self.pool(contr_3)
contr_4 = self.contr_4_2(self.contr_4_1(pool))
pool = self.pool(contr_4)
center = self.center(pool)
return center
class GlobalConUnet(nn.Module):
def __init__(self, in_channels=1, initial_filter_size=64):
super().__init__()
self.encoder = DownsampleUnet(in_channels, initial_filter_size)
def forward(self, x):
y = self.encoder(x)
return y
class MLP(nn.Module):
def __init__(self, input_channels=512, num_class=128):
super().__init__()
self.gap = nn.AdaptiveAvgPool2d(1)
self.f1 = nn.Linear(input_channels, input_channels)
self.f2 = nn.Linear(input_channels, num_class)
def forward(self, x):
x = self.gap(x)
y = self.f1(x.squeeze())
y = self.f2(y)
return y
class UpsampleUnet2(nn.Module):
def __init__(self, in_channels=1, initial_filter_size=64, kernel_size=3, do_instancenorm=True):
super().__init__()
self.contr_1_1 = self.contract(in_channels, initial_filter_size, kernel_size, instancenorm=do_instancenorm)
self.contr_1_2 = self.contract(initial_filter_size, initial_filter_size, kernel_size, instancenorm=do_instancenorm)
self.pool = nn.MaxPool2d(2, stride=2)
self.contr_2_1 = self.contract(initial_filter_size, initial_filter_size*2, kernel_size, instancenorm=do_instancenorm)
self.contr_2_2 = self.contract(initial_filter_size*2, initial_filter_size*2, kernel_size, instancenorm=do_instancenorm)
# self.pool2 = nn.MaxPool2d(2, stride=2)
self.contr_3_1 = self.contract(initial_filter_size*2, initial_filter_size*2**2, kernel_size, instancenorm=do_instancenorm)
self.contr_3_2 = self.contract(initial_filter_size*2**2, initial_filter_size*2**2, kernel_size, instancenorm=do_instancenorm)
# self.pool3 = nn.MaxPool2d(2, stride=2)
self.contr_4_1 = self.contract(initial_filter_size*2**2, initial_filter_size*2**3, kernel_size, instancenorm=do_instancenorm)
self.contr_4_2 = self.contract(initial_filter_size*2**3, initial_filter_size*2**3, kernel_size, instancenorm=do_instancenorm)
# self.pool4 = nn.MaxPool2d(2, stride=2)
self.center = nn.Sequential(
nn.Conv2d(initial_filter_size*2**3, initial_filter_size*2**4, 3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(initial_filter_size*2**4, initial_filter_size*2**4, 3, padding=1),
nn.ReLU(inplace=True),
nn.ConvTranspose2d(initial_filter_size*2**4, initial_filter_size*2**3, 2, stride=2),
nn.ReLU(inplace=True),
)
self.expand_4_1 = self.expand(initial_filter_size * 2 ** 4, initial_filter_size * 2 ** 3)
self.expand_4_2 = self.expand(initial_filter_size * 2 ** 3, initial_filter_size * 2 ** 3)
self.upscale4 = nn.ConvTranspose2d(initial_filter_size * 2 ** 3, initial_filter_size * 2 ** 2, kernel_size=2,
stride=2)
self.expand_3_1 = self.expand(initial_filter_size * 2 ** 3, initial_filter_size * 2 ** 2)
self.expand_3_2 = self.expand(initial_filter_size * 2 ** 2, initial_filter_size * 2 ** 2)
self.upscale3 = nn.ConvTranspose2d(initial_filter_size * 2 ** 2, initial_filter_size * 2, 2, stride=2)
self.expand_2_1 = self.expand(initial_filter_size * 2 ** 2, initial_filter_size * 2)
self.expand_2_2 = self.expand(initial_filter_size * 2, initial_filter_size * 2)
self.softmax = torch.nn.Softmax2d()
@staticmethod
def contract(in_channels, out_channels, kernel_size=3, instancenorm=True):
if instancenorm:
layer = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size, padding=1),
nn.InstanceNorm2d(out_channels),
nn.LeakyReLU(inplace=True))
else:
layer = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size, padding=1),
nn.LeakyReLU(inplace=True))
return layer
@staticmethod
def expand(in_channels, out_channels, kernel_size=3):
layer = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size, padding=1),
nn.LeakyReLU(inplace=True),
)
return layer
@staticmethod
def center_crop(layer, target_width, target_height):
batch_size, n_channels, layer_width, layer_height = layer.size()
xy1 = (layer_width - target_width) // 2
xy2 = (layer_height - target_height) // 2
return layer[:, :, xy1:(xy1 + target_width), xy2:(xy2 + target_height)]
def forward(self, x, enable_concat=True):
concat_weight = 1
if not enable_concat:
concat_weight = 0
contr_1 = self.contr_1_2(self.contr_1_1(x))
pool = self.pool(contr_1)
contr_2 = self.contr_2_2(self.contr_2_1(pool))
pool = self.pool(contr_2)
contr_3 = self.contr_3_2(self.contr_3_1(pool))
pool = self.pool(contr_3)
contr_4 = self.contr_4_2(self.contr_4_1(pool))
pool = self.pool(contr_4)
center = self.center(pool)
crop = self.center_crop(contr_4, center.size()[2], center.size()[3])
concat = torch.cat([center, crop * concat_weight], 1)
expand = self.expand_4_2(self.expand_4_1(concat))
upscale = self.upscale4(expand)
crop = self.center_crop(contr_3, upscale.size()[2], upscale.size()[3])
concat = torch.cat([upscale, crop * concat_weight], 1)
expand = self.expand_3_2(self.expand_3_1(concat))
upscale = self.upscale3(expand)
crop = self.center_crop(contr_2, upscale.size()[2], upscale.size()[3])
concat = torch.cat([upscale, crop * concat_weight], 1)
expand = self.expand_2_2(self.expand_2_1(concat))
return expand
class LocalConUnet2(nn.Module):
def __init__(self, num_classes, in_channels=1, initial_filter_size=64):
super().__init__()
self.encoder = UpsampleUnet2(in_channels, initial_filter_size)
self.head = MLP(input_channels=initial_filter_size*2, num_class=num_classes)
def forward(self, x):
y = self.encoder(x)
return y
class UpsampleUnet3(nn.Module):
def __init__(self, in_channels=1, initial_filter_size=64, kernel_size=3, do_instancenorm=True):
super().__init__()
self.contr_1_1 = self.contract(in_channels, initial_filter_size, kernel_size, instancenorm=do_instancenorm)
self.contr_1_2 = self.contract(initial_filter_size, initial_filter_size, kernel_size, instancenorm=do_instancenorm)
self.pool = nn.MaxPool2d(2, stride=2)
self.contr_2_1 = self.contract(initial_filter_size, initial_filter_size*2, kernel_size, instancenorm=do_instancenorm)
self.contr_2_2 = self.contract(initial_filter_size*2, initial_filter_size*2, kernel_size, instancenorm=do_instancenorm)
# self.pool2 = nn.MaxPool2d(2, stride=2)
self.contr_3_1 = self.contract(initial_filter_size*2, initial_filter_size*2**2, kernel_size, instancenorm=do_instancenorm)
self.contr_3_2 = self.contract(initial_filter_size*2**2, initial_filter_size*2**2, kernel_size, instancenorm=do_instancenorm)
# self.pool3 = nn.MaxPool2d(2, stride=2)
self.contr_4_1 = self.contract(initial_filter_size*2**2, initial_filter_size*2**3, kernel_size, instancenorm=do_instancenorm)
self.contr_4_2 = self.contract(initial_filter_size*2**3, initial_filter_size*2**3, kernel_size, instancenorm=do_instancenorm)
# self.pool4 = nn.MaxPool2d(2, stride=2)
self.center = nn.Sequential(
nn.Conv2d(initial_filter_size*2**3, initial_filter_size*2**4, 3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(initial_filter_size*2**4, initial_filter_size*2**4, 3, padding=1),
nn.ReLU(inplace=True),
nn.ConvTranspose2d(initial_filter_size*2**4, initial_filter_size*2**3, 2, stride=2),
nn.ReLU(inplace=True),
)
self.expand_4_1 = self.expand(initial_filter_size * 2 ** 4, initial_filter_size * 2 ** 3)
self.expand_4_2 = self.expand(initial_filter_size * 2 ** 3, initial_filter_size * 2 ** 3)
self.upscale4 = nn.ConvTranspose2d(initial_filter_size * 2 ** 3, initial_filter_size * 2 ** 2, kernel_size=2,
stride=2)
self.expand_3_1 = self.expand(initial_filter_size * 2 ** 3, initial_filter_size * 2 ** 2)
self.expand_3_2 = self.expand(initial_filter_size * 2 ** 2, initial_filter_size * 2 ** 2)
self.softmax = torch.nn.Softmax2d()
@staticmethod
def contract(in_channels, out_channels, kernel_size=3, instancenorm=True):
if instancenorm:
layer = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size, padding=1),
nn.InstanceNorm2d(out_channels),
nn.LeakyReLU(inplace=True))
else:
| |
"Implements ConstraintSet"
from collections import defaultdict, OrderedDict
from itertools import chain
import numpy as np
from ..small_classes import Numbers
from ..keydict import KeySet, KeyDict
from ..small_scripts import try_str_without
from ..repr_conventions import GPkitObject
from .single_equation import SingleEquationConstraint
def add_meq_bounds(bounded, meq_bounded): #TODO: collapse with GP version?
"Iterates through meq_bounds until convergence"
still_alive = True
while still_alive:
still_alive = False # if no changes are made, the loop exits
for bound, conditions in list(meq_bounded.items()):
if bound in bounded: # bound exists in an inequality
del meq_bounded[bound]
continue
for condition in conditions:
if condition.issubset(bounded): # bound's condition is met
del meq_bounded[bound]
bounded.add(bound)
still_alive = True
break
def _sort_by_name_and_idx(var):
"return tuple for Variable sorting"
return (var.key.str_without(["units", "idx"]), var.key.idx or ())
def _sort_constrs(item):
"return tuple for Constraint sorting"
label, constraint = item
return (not isinstance(constraint, SingleEquationConstraint),
hasattr(constraint, "lineage") and bool(constraint.lineage), label)
class ConstraintSet(list, GPkitObject):
"Recursive container for ConstraintSets and Inequalities"
unique_varkeys, idxlookup = frozenset(), {}
varkeys = _name_collision_varkeys = None
def __init__(self, constraints, substitutions=None): # pylint: disable=too-many-branches,too-many-statements
if isinstance(constraints, ConstraintSet):
constraints = [constraints] # put it one level down
elif isinstance(constraints, dict):
if isinstance(constraints, OrderedDict):
items = constraints.items()
else:
items = sorted(list(constraints.items()), key=_sort_constrs)
self.idxlookup = {k: i for i, (k, _) in enumerate(items)}
constraints = list(zip(*items))[1]
list.__init__(self, constraints)
# initializations for attributes used elsewhere
self.numpy_bools = False
# get substitutions and convert all members to ConstraintSets
self.varkeys = KeySet(self.unique_varkeys)
self.substitutions = KeyDict({k: k.value for k in self.unique_varkeys
if "value" in k.descr})
self.substitutions.varkeys = self.varkeys
self.bounded, self.meq_bounded = set(), defaultdict(set)
for i, constraint in enumerate(self):
if not isinstance(constraint, ConstraintSet):
if hasattr(constraint, "__iter__"):
list.__setitem__(self, i, ConstraintSet(constraint))
elif not hasattr(constraint, "as_hmapslt1"):
if not isinstance(constraint, np.bool_):
raise_badelement(self, i, constraint)
else: # allow NomialArray equalities (arr == "a", etc.)
self.numpy_bools = True # but mark them so
elif not hasattr(constraint, "numpy_bools"): # we can catch them!
raise ValueError("a ConstraintSet of type %s was included in"
" another ConstraintSet before being"
" initialized." % type(constraint))
elif constraint.numpy_bools:
raise_elementhasnumpybools(constraint)
if hasattr(self[i], "varkeys"):
self.varkeys.update(self[i].varkeys)
if hasattr(self[i], "substitutions"):
self.substitutions.update(self[i].substitutions)
else:
self.substitutions.update({k: k.value \
for k in self[i].varkeys if "value" in k.descr})
self.bounded.update(self[i].bounded)
for bound, solutionset in self[i].meq_bounded.items():
self.meq_bounded[bound].update(solutionset)
if type(self[i]) is ConstraintSet: # pylint: disable=unidiomatic-typecheck
del self[i].varkeys
del self[i].substitutions
del self[i].bounded
del self[i].meq_bounded
# TODO, speedup: make constraintset more and more a list;
# don't turn every sub-element into its own dang set.
# keep a flattened list of those with hmapslt1,
# process_result, and Thats It.
if substitutions:
self.substitutions.update(substitutions)
updated_veckeys = False # vector subs need to find each indexed varkey
for subkey in self.substitutions:
if not updated_veckeys and subkey.shape and not subkey.idx:
for key in self.varkeys:
if key.veckey:
self.varkeys.keymap[key.veckey].add(key)
updated_veckeys = True
for key in self.varkeys[subkey]:
self.bounded.add((key, "upper"))
self.bounded.add((key, "lower"))
if key.value is not None and not key.constant:
del key.descr["value"]
if key.veckey and key.veckey.value is not None:
del key.veckey.descr["value"]
add_meq_bounds(self.bounded, self.meq_bounded)
def __getitem__(self, key):
if key in self.idxlookup:
key = self.idxlookup[key]
if isinstance(key, int):
return list.__getitem__(self, key)
return self._choosevar(key, self.variables_byname(key))
def _choosevar(self, key, variables):
if not variables:
raise KeyError(key)
firstvar, *othervars = variables
veckey = firstvar.key.veckey
if veckey is None or any(v.key.veckey != veckey for v in othervars):
if not othervars:
return firstvar
raise ValueError("multiple variables are called '%s'; show them"
" with `.variables_byname('%s')`" % (key, key))
from ..nomials import NomialArray # all one vector!
arr = NomialArray(np.full(veckey.shape, np.nan, dtype="object"))
for v in variables:
arr[v.key.idx] = v
arr.key = veckey
return arr
def variables_byname(self, key):
"Get all variables with a given name"
from ..nomials import Variable
return sorted([Variable(k) for k in self.varkeys[key]],
key=_sort_by_name_and_idx)
def constrained_varkeys(self):
"Return all varkeys in non-ConstraintSet constraints"
constrained_varkeys = set()
for constraint in self.flat():
constrained_varkeys.update(constraint.varkeys)
return constrained_varkeys
def flat(self):
"Yields contained constraints, optionally including constraintsets."
for constraint in self:
if isinstance(constraint, ConstraintSet):
yield from constraint.flat()
elif hasattr(constraint, "__iter__"):
yield from constraint
else:
yield constraint
def flathmaps(self, subs):
"Yields hmaps<=1 from self.flat()"
yield from chain(*(l.as_hmapslt1(subs) for l in self.flat()))
def process_result(self, result):
"""Does arbitrary computation / manipulation of a program's result
There's no guarantee what order different constraints will process
results in, so any changes made to the program's result should be
careful not to step on other constraint's toes.
Potential Uses
--------------
- check that an inequality was tight
- add values computed from solved variables
"""
for constraint in self:
if hasattr(constraint, "process_result"):
constraint.process_result(result)
for v in self.unique_varkeys:
if not v.evalfn or v in result["variables"]:
continue
if v.veckey:
v = v.veckey
val = v.evalfn(result["variables"])
result["variables"][v] = result["freevariables"][v] = val
def __repr__(self):
"Returns namespaced string."
if not self:
return "<gpkit.%s object>" % self.__class__.__name__
return ("<gpkit.%s object containing %i top-level constraint(s)"
" and %i variable(s)>" % (self.__class__.__name__,
len(self), len(self.varkeys)))
def name_collision_varkeys(self):
"Returns the set of contained varkeys whose names are not unique"
if self._name_collision_varkeys is None:
self._name_collision_varkeys = set()
for key in self.varkeys:
if len(self.varkeys[key.str_without(["lineage", "vec"])]) > 1:
self._name_collision_varkeys.add(key)
return self._name_collision_varkeys
def lines_without(self, excluded):
"Lines representation of a ConstraintSet."
root = "root" not in excluded
rootlines, lines = [], []
indent = " "*2 if (len(self) > 1
or getattr(self, "lineage", None)) else ""
if root:
excluded += ("root",)
if "unnecessary lineage" in excluded:
for key in self.name_collision_varkeys():
key.descr["necessarylineage"] = True
if hasattr(self, "_rootlines"):
rootlines = self._rootlines(excluded) # pylint: disable=no-member
if self.idxlookup:
named_constraints = {v: k for k, v in self.idxlookup.items()}
for i, constraint in enumerate(self):
clines = try_str_without(constraint, excluded).split("\n")
if (getattr(constraint, "lineage", None)
and isinstance(constraint, ConstraintSet)):
name, num = constraint.lineage[-1]
if not any(clines):
clines = [indent + "(no constraints)"]
if lines:
lines.append("")
lines.append(name if not num else name + str(num))
elif ("constraint names" not in excluded
and self.idxlookup and i in named_constraints):
lines.append("\"%s\":" % named_constraints[i])
for j, line in enumerate(clines):
if clines[j][:len(indent)] != indent:
clines[j] = indent + line # must be indented
lines.extend(clines)
if root:
indent = " "
if "unnecessary lineage" in excluded:
for key in self.name_collision_varkeys():
del key.descr["necessarylineage"]
return rootlines + [indent+line for line in lines]
def str_without(self, excluded=("unnecessary lineage", "units")):
"String representation of a ConstraintSet."
return "\n".join(self.lines_without(excluded))
def latex(self, excluded=("units",)):
"LaTeX representation of a ConstraintSet."
lines = []
root = "root" not in excluded
if root:
excluded += ("root",)
lines.append("\\begin{array}{ll} \\text{}")
if hasattr(self, "_rootlatex"):
lines.append(self._rootlatex(excluded)) # pylint: disable=no-member
for constraint in self:
cstr = try_str_without(constraint, excluded, latex=True)
if cstr[:6] != " & ": # require indentation
cstr = " & " + cstr + " \\\\"
lines.append(cstr)
if root:
lines.append("\\end{array}")
return "\n".join(lines)
def as_view(self):
"Return a ConstraintSetView of this ConstraintSet."
return ConstraintSetView(self)
class ConstraintSetView:
"Class to access particular views on a set's variables"
def __init__(self, constraintset, index=()):
self.constraintset = constraintset
try:
self.index = tuple(index)
except TypeError: # probably not iterable
self.index = (index,)
def __getitem__(self, index):
"Appends the index to its own and returns a new view."
if not isinstance(index, tuple):
index = (index,)
# indexes are preprended to match Vectorize convention
return ConstraintSetView(self.constraintset, index + self.index)
def __getattr__(self, attr):
"""Returns attribute from the base ConstraintSets
If it's a another ConstraintSet, return the matching View;
if it's an array, return it at the specified index;
otherwise, raise an error.
"""
if not hasattr(self.constraintset, attr):
raise AttributeError("the underlying object lacks `.%s`." % attr)
value = getattr(self.constraintset, attr)
if isinstance(value, ConstraintSet):
return ConstraintSetView(value, self.index)
if not hasattr(value, "shape"):
raise ValueError("attribute %s with value %s did not have"
" a shape, so ConstraintSetView cannot"
" return an indexed view." % (attr, value))
index = self.index
newdims = len(value.shape) - len(self.index)
if newdims > 0: # indexes are put last to match Vectorize
index = (slice(None),)*newdims + index
return value[index]
def raise_badelement(cns, i, constraint):
"Identify the bad element and raise a ValueError"
cause = "" if not isinstance(constraint, bool) else (
" Did the constraint list contain an accidental equality?")
if len(cns) == 1:
loc = "as the only constraint"
elif i == 0:
loc = "at the start, before %s" % cns[i+1]
elif i == len(cns) - 1:
loc = "at the end, after %s" % cns[i-1]
else:
loc = "between %s and %s" % (cns[i-1], cns[i+1])
raise ValueError("%s was found %s.%s"
| |
import h5py
import os
import glob
import re
import numpy as np
from . import peano
import warnings
from scipy.integrate import quad
base_path = os.environ['EAGLE_BASE_PATH']
release = os.environ['EAGLE_ACCESS_TYPE']
class Snapshot:
""" Basic SnapShot superclass which finds the relevant files and gets relevant information
regarding the snapshot specified.
arguments:
run - the run (e.g. L0012N0188)
model - an EAGLE model (e.g. Ref)
tag - a tag string specifying a snapshot output (e.g. 028_z000p000)
history:
written - Mackereth (UoB) - 22/11/2019
"""
def __init__(self, run, model, tag, load_particles=False):
#store the snapshot identity info
self.run = run
self.model = model
self.tag = tag
if release == 'public':
self.simlabel = self.model+self.run
self.snaplabel = 'snapshot_'+self.tag
self.base_subfile = 'snap_'+self.tag
self.path = os.path.join(base_path, self.simlabel, self.snaplabel)
elif release == 'ARI':
self.snaplabel = 'snapshot_'+self.tag
self.base_subfile = 'snap_'+self.tag
self.path = os.path.join(base_path, self.run, self.model, 'data', self.snaplabel)
else:
raise Exception('private/custom data access is not yet implemented!')
if not os.path.exists(os.path.join(self.path, self.base_subfile+'.0.hdf5')):
raise Exception('could not see snapshot data in directory: '+self.path)
#get the files related to this snapshot and load some of their metadata
self.files = natural_sort(glob.glob(os.path.join(self.path, self.base_subfile+'*.hdf5')))
self.nfiles = len(self.files)
self.header_dict = dict(h5py.File(self.files[0], 'r')['/Header'].attrs.items())
self.abundance_dict = dict(h5py.File(self.files[0], 'r')['/Parameters/ChemicalElements'].attrs.items())
self.elements = ['Hydrogen', 'Helium', 'Carbon', 'Nitrogen', 'Oxygen', 'Silicon', 'Sulphur', 'Magnesium', 'Iron']
self.solar_abundances = dict([(self.elements[i],self.abundance_dict['SolarAbundance_%s' % self.elements[i]]) for i in range(len(self.elements))])
self.BoxSize = self.header_dict['BoxSize']
self.HubbleParam = self.header_dict['HubbleParam']
self.Omega0, self.OmegaLambda, self.OmegaBaryon, self.a0 = self.header_dict['Omega0'], self.header_dict['OmegaLambda'], self.header_dict['OmegaBaryon'], self.header_dict['ExpansionFactor']
self.NumPartTotal = self.header_dict['NumPart_Total']
self.ParticleTypes = np.array([0,1,2,3,4,5])
self.ParticleTypePresent = self.NumPartTotal > 0
self.ParticleTypePresent_file = np.zeros((len(self.files),len(self.NumPartTotal)), dtype=bool)
for ii, file in enumerate(self.files):
head = dict(h5py.File(file, 'r')['/Header'].attrs.items())
self.ParticleTypePresent_file[ii, head['NumPart_ThisFile'] > 0] = True
self._ptypeind = {self.ParticleTypes[self.ParticleTypePresent][i]:i for i in range(len(self.ParticleTypes[self.ParticleTypePresent]))}
#get the Hash Table info for P-H key sorting
self.HashBits = dict(h5py.File(self.files[0], 'r')['/HashTable'].attrs.items())['HashBits']
self.HashGridSideLength = 2**self.HashBits
self.HashGridCellSize = self.BoxSize/self.HashGridSideLength
self.firstkeys = np.zeros((len(self.ParticleTypes[self.ParticleTypePresent]),self.nfiles))
self.lastkeys = np.zeros((len(self.ParticleTypes[self.ParticleTypePresent]),self.nfiles))
self.datasets = {}
for ii,parttype in enumerate(self.ParticleTypes[self.ParticleTypePresent]):
self.firstkeys[ii] = np.array(h5py.File(self.files[0], 'r')['/HashTable/PartType'+str(parttype)+'/FirstKeyInFile'])
self.lastkeys[ii] = np.array(h5py.File(self.files[0], 'r')['/HashTable/PartType'+str(parttype)+'/LastKeyInFile'])
#be sure we get a file with this parttype (only really an issue for when low N stars!!)
ind = np.nonzero(h5py.File(self.files[0], 'r')['/HashTable/PartType'+str(parttype)+'/LastKeyInFile'][:])[0][0]
self.datasets['PartType'+str(parttype)] = list(h5py.File(self.files[ind], 'r')['/PartType'+str(parttype)].keys())
if load_particles:
self._get_coordinates()
def _get_coordinates(self):
""" Load all the coordinates of the available particles
"""
#load coordinates and velocities
coordinates = []
velocities = []
for ii,type in enumerate(self.ParticleTypes[self.ParticleTypePresent]):
#now load the coordinates in these files and save the indices for each particle type
thistypecoord, thistypevels = self._get_parttype_indices(type, self.files)
coordinates.append(thistypecoord)
velocities.append(thistypevels)
self.velocities = velocities
self.coordinates = coordinates
def _get_parttype_indices(self, parttype, files):
"""get the coordinates and indices for a given particle type in a given region"""
coords, velocities, indices = [], [], []
for ii,file in enumerate(files):
#check this particle type is present here
if not _particle_type_present(parttype, file):
return None, None
# load the file
thisfilecoords = np.array(h5py.File(file, 'r')['/PartType'+str(parttype)+'/Coordinates'])
thisfilevels = np.array(h5py.File(file, 'r')['/PartType'+str(parttype)+'/Velocity'])
#store the coordinates and the indices of these particles in the file
coords.append(thisfilecoords)
velocities.append(thisfilevels)
return np.concatenate(coords), np.concatenate(velocities)
def _get_coords_vels(self, parttype, files):
"""get the coordinates and velocities for all particles of a certain type"""
if not self.ParticleTypePresent[parttype]:
warnings.warn('Particle type is not present, returning empty arrays...')
return np.array([]), np.array([]), np.array([])
coords, velocities, indices = [], [], []
for file in files:
# load the file
thisfilecoords = np.array(h5py.File(file, 'r')['/PartType'+str(parttype)+'/Coordinates'])
thisfilevels = np.array(h5py.File(file, 'r')['/PartType'+str(parttype)+'/Velocity'])
#store the coordinates and the indices of these particles in the file
coords.append(thisfilecoords)
velocities.append(thisfilevels)
return np.concatenate(coords), np.concatenate(velocities)
def get_dataset(self, parttype, dataset, physical=False, cgs=False):
""" get the data for a given entry in the HDF5 file for the given region """
if not self.ParticleTypePresent[parttype]:
warnings.warn('Particle type is not present, returning empty arrays...')
return np.array([])
key = os.path.join('/PartType'+str(parttype),dataset)
if physical:
#find conversion factor
factor = self._conversion_factor(key, self.a0, self.HubbleParam, cgs=cgs)
elif not physical and cgs:
factor = h5py.File(self.files[0], 'r')[key].attrs['CGSConversionFactor']
else:
#else just multiply by 1!
factor = 1.
out = []
for ii,file in enumerate(self.files):
# load this file and get the particles
out.append(np.array(h5py.File(file, 'r')[key]) * factor)
return np.concatenate(out)
def _conversion_factor(self, key, a, h, cgs=False):
aexp_scale, h_scale = self._get_conversion_factor_exponents(key)
if cgs:
cgs_factor = h5py.File(self.files[0], 'r')[key].attrs['CGSConversionFactor']
else:
cgs_factor = 1.
return a**(aexp_scale)*h**(h_scale)*cgs_factor
def _get_conversion_factor_exponents(self, key):
aexp_scale = h5py.File(self.files[0], 'r')[key].attrs['aexp-scale-exponent']
h_scale = h5py.File(self.files[0], 'r')[key].attrs['h-scale-exponent']
return aexp_scale, h_scale
def _single_X_H(self,X,H,element):
solar = self.solar_abundances[element]
solarH = self.solar_abundances['Hydrogen']
return np.log10(X/H)-np.log10(solar/solarH)
def abundance_ratios(self,gas=False,smoothed=True):
""" Compute element abundance ratios for the region, returns a dict of [X/H] """
if smoothed:
e_key = 'SmoothedElementAbundance'
else:
e_key = 'ElementAbundance'
if gas:
parttype = 0
else:
parttype = 4
entries = []
H = self.get_dataset(parttype,os.path.join(e_key,'Hydrogen'))
for i in range(len(self.elements)):
if self.elements[i] == 'Hydrogen' or self.elements[i] == 'Sulphur':
continue
X = self.get_dataset(parttype,os.path.join(e_key,self.elements[i]))
entries.append((self.elements[i],self._single_X_H(X,H,self.elements[i])))
return dict(entries)
def t_lookback(self,a):
return a / (np.sqrt(self.Omega0 * a + self.OmegaLambda * (a ** 4)))
def z2age(self,z):
a = 1 / (1 + z)
t = np.array([quad(self.t_lookback, x, self.a0)[0] for x in a])
return (1 / (self.HubbleParam * 100)) * (3.086e19 / 3.1536e16) * t
def a2age(self,a):
t = np.array([quad(self.t_lookback, x, self.a0)[0] for x in a])
return (1 / (self.HubbleParam * 100)) * (3.086e19 / 3.1536e16) * t
def z2tau(self,z):
t_em = quad(self.t_lookback, 0., self.a0)[0]
t_em = (1 / (self.HubbleParam * 100)) * (3.086e19 / 3.1536e16) * t_em
a = 1 / (1 + z)
t = np.array([quad(self.t_lookback, x, self.a0)[0] for x in a])
return t_em - ((1 / (self.HubbleParam * 100)) * (3.086e19 / 3.1536e16) * t)
def a2tau(self,a):
t_em = quad(self.t_lookback, 0., self.a0)[0]
t_em = (1 / (self.HubbleParam * 100)) * (3.086e19 / 3.1536e16) * t_em
t = np.array([quad(self.t_lookback, x, self.a0)[0] for x in a])
return t_em - ((1 / (self.HubbleParam * 100)) * (3.086e19 / 3.1536e16) * t)
class SnapshotRegion(Snapshot):
""" A class inheriting from SnapShot, which defines a region inside a larger simulation snapshot.
when initialised, this will read the files in that region, and get the indices of the particles inside the
desired region. The necessary datasets can then be loaded by using get_dataset.
arguments:
run - the run (e.g. L0012N0188)
model - an EAGLE model (e.g. Ref)
tag - a tag string specifying a snapshot output (e.g. 028_z000p000)
center - the center of the desired region
sidelength - the length of a side of the volume required
history:
written - Mackereth (UoB) - 22/11/2019
"""
def __init__(self, run, model, tag, center, sidelength, just_get_files=False):
#we want everything from SnapShot plus some extras
super().__init__(run, model, tag)
self.center = center
self.sidelength = sidelength
self.centered = False
self._index_region(self.center, self.sidelength, justfiles=just_get_files)
def _index_region(self, center, side_length, phgrid_n=70, justfiles=False):
""" Load a region defined by a central cordinate and a side length
arguments:
center - the [x,y,z] coordinate of the desired center (simulation units)
side_length - the desired side length (in the simulation units)
keyword arguments:
phgrid_n - the number of grid points along a side length to look for PH cells (default 70)
"""
#work out which files contain the desired region
grid = peano.coordinate_grid(center, side_length, self.BoxSize, n=phgrid_n)
keys = peano.get_unique_grid_keys(grid, self.HashGridCellSize, self.BoxSize, bits=self.HashBits)
particles_in_volume = self.ParticleTypes[self.ParticleTypePresent]
self.files_for_region = []
self.file_indices = []
coordinates = []
velocities = []
indices = []
for ii in self.ParticleTypes:
if not self.ParticleTypePresent[ii]:
continue
Nfiles = self._get_parttype_files(ii, keys)
if len(Nfiles) < 1:
#particle is not present in the region - remove from here
self.ParticleTypePresent[ii] = 0
continue
thisfiles = np.array(self.files)[Nfiles]
thisindices = Nfiles
self.files_for_region.append(thisfiles)
self.file_indices.append(Nfiles)
if justfiles:
continue
present = False
for file in thisfiles:
present += _particle_type_present(ii, file)
if present:
#now load the coordinates in these files and save the indices for each particle type
thistypecoord, thistypevels, thistypeindices = self._get_parttype_indices(ii, thisfiles, thisindices)
if thistypecoord is None:
self.ParticleTypePresent[ii] = 0
continue
coordinates.append(thistypecoord)
velocities.append(thistypevels)
indices.append(thistypeindices)
else:
self.ParticleTypePresent[ii] = 0
if not justfiles:
self.velocities = velocities
self.coordinates = coordinates
self.indices = indices
self.NumPart_ThisRegion = np.zeros(len(self.NumPartTotal),dtype=np.int64)
for ii,type in enumerate(self.ParticleTypes[self.ParticleTypePresent]):
self.NumPart_ThisRegion[type] = len(self.coordinates[ii])
def _get_parttype_indices(self, parttype, files, file_indices):
"""get the coordinates and indices for a given particle type in a given region"""
coords, velocities, indices = [], [], []
for ii,file in enumerate(files):
#check this particle type is present here
if not _particle_type_present(parttype, file):
return None, None, None
# load the file
thisfilecoords = np.array(h5py.File(file, 'r')['/PartType'+str(parttype)+'/Coordinates'])
thisfilevels = np.array(h5py.File(file, 'r')['/PartType'+str(parttype)+'/Velocity'])
if (np.array(self.center)+self.sidelength > self.BoxSize).any():
thisfilecoords = thisfilecoords - (self.center - self.BoxSize/2.)
thisfilecoords | |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'resources/gui.ui'
#
# Created by: PyQt5 UI code generator 5.15.3
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1049, 1076)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(MainWindow.sizePolicy().hasHeightForWidth())
MainWindow.setSizePolicy(sizePolicy)
MainWindow.setMinimumSize(QtCore.QSize(0, 0))
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.gridLayout = QtWidgets.QGridLayout(self.centralwidget)
self.gridLayout.setObjectName("gridLayout")
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setContentsMargins(-1, 0, -1, -1)
self.verticalLayout.setObjectName("verticalLayout")
self.horizontalLayout_13 = QtWidgets.QHBoxLayout()
self.horizontalLayout_13.setContentsMargins(-1, 0, -1, -1)
self.horizontalLayout_13.setObjectName("horizontalLayout_13")
self.stackedWidget_parameter = QtWidgets.QStackedWidget(self.centralwidget)
self.stackedWidget_parameter.setEnabled(True)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.stackedWidget_parameter.sizePolicy().hasHeightForWidth())
self.stackedWidget_parameter.setSizePolicy(sizePolicy)
self.stackedWidget_parameter.setStyleSheet("")
self.stackedWidget_parameter.setFrameShadow(QtWidgets.QFrame.Plain)
self.stackedWidget_parameter.setLineWidth(0)
self.stackedWidget_parameter.setObjectName("stackedWidget_parameter")
self.parameter = QtWidgets.QWidget()
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.parameter.sizePolicy().hasHeightForWidth())
self.parameter.setSizePolicy(sizePolicy)
self.parameter.setObjectName("parameter")
self.horizontalLayout_18 = QtWidgets.QHBoxLayout(self.parameter)
self.horizontalLayout_18.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_18.setObjectName("horizontalLayout_18")
self.mainLayout_parameter = QtWidgets.QVBoxLayout()
self.mainLayout_parameter.setSizeConstraint(QtWidgets.QLayout.SetDefaultConstraint)
self.mainLayout_parameter.setSpacing(10)
self.mainLayout_parameter.setObjectName("mainLayout_parameter")
self.label_parameter = QtWidgets.QLabel(self.parameter)
self.label_parameter.setMaximumSize(QtCore.QSize(16777215, 25))
font = QtGui.QFont()
font.setPointSize(16)
self.label_parameter.setFont(font)
self.label_parameter.setAlignment(QtCore.Qt.AlignCenter)
self.label_parameter.setObjectName("label_parameter")
self.mainLayout_parameter.addWidget(self.label_parameter)
self.verticalLayout_3 = QtWidgets.QVBoxLayout()
self.verticalLayout_3.setSizeConstraint(QtWidgets.QLayout.SetDefaultConstraint)
self.verticalLayout_3.setContentsMargins(0, 0, -1, -1)
self.verticalLayout_3.setSpacing(5)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.horizontalLayout_19 = QtWidgets.QHBoxLayout()
self.horizontalLayout_19.setContentsMargins(-1, 0, -1, -1)
self.horizontalLayout_19.setObjectName("horizontalLayout_19")
self.label_16 = QtWidgets.QLabel(self.parameter)
self.label_16.setMinimumSize(QtCore.QSize(0, 21))
self.label_16.setObjectName("label_16")
self.horizontalLayout_19.addWidget(self.label_16)
self.comboBox_preset = QtWidgets.QComboBox(self.parameter)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.comboBox_preset.sizePolicy().hasHeightForWidth())
self.comboBox_preset.setSizePolicy(sizePolicy)
self.comboBox_preset.setMinimumSize(QtCore.QSize(50, 0))
self.comboBox_preset.setEditable(False)
self.comboBox_preset.setCurrentText("")
self.comboBox_preset.setObjectName("comboBox_preset")
self.horizontalLayout_19.addWidget(self.comboBox_preset)
self.verticalLayout_3.addLayout(self.horizontalLayout_19)
self.line = QtWidgets.QFrame(self.parameter)
self.line.setEnabled(False)
self.line.setFrameShape(QtWidgets.QFrame.HLine)
self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line.setObjectName("line")
self.verticalLayout_3.addWidget(self.line)
spacerItem = QtWidgets.QSpacerItem(20, 3, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
self.verticalLayout_3.addItem(spacerItem)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.label_Menschen = QtWidgets.QLabel(self.parameter)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_Menschen.sizePolicy().hasHeightForWidth())
self.label_Menschen.setSizePolicy(sizePolicy)
self.label_Menschen.setMinimumSize(QtCore.QSize(170, 21))
self.label_Menschen.setMaximumSize(QtCore.QSize(170, 16777215))
self.label_Menschen.setObjectName("label_Menschen")
self.horizontalLayout.addWidget(self.label_Menschen)
self.eingabe_menschen = QtWidgets.QLineEdit(self.parameter)
self.eingabe_menschen.setMinimumSize(QtCore.QSize(80, 0))
self.eingabe_menschen.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.eingabe_menschen.setToolTip("")
self.eingabe_menschen.setStatusTip("")
self.eingabe_menschen.setAccessibleDescription("")
self.eingabe_menschen.setInputMethodHints(QtCore.Qt.ImhNone)
self.eingabe_menschen.setInputMask("")
self.eingabe_menschen.setFrame(True)
self.eingabe_menschen.setEchoMode(QtWidgets.QLineEdit.Normal)
self.eingabe_menschen.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.eingabe_menschen.setDragEnabled(False)
self.eingabe_menschen.setObjectName("eingabe_menschen")
self.horizontalLayout.addWidget(self.eingabe_menschen)
self.verticalLayout_3.addLayout(self.horizontalLayout)
self.horizontalLayout_14 = QtWidgets.QHBoxLayout()
self.horizontalLayout_14.setObjectName("horizontalLayout_14")
self.label_infiziert = QtWidgets.QLabel(self.parameter)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_infiziert.sizePolicy().hasHeightForWidth())
self.label_infiziert.setSizePolicy(sizePolicy)
self.label_infiziert.setMinimumSize(QtCore.QSize(170, 21))
self.label_infiziert.setMaximumSize(QtCore.QSize(170, 16777215))
self.label_infiziert.setObjectName("label_infiziert")
self.horizontalLayout_14.addWidget(self.label_infiziert)
self.eingabe_infiziert = QtWidgets.QLineEdit(self.parameter)
self.eingabe_infiziert.setMinimumSize(QtCore.QSize(80, 0))
self.eingabe_infiziert.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.eingabe_infiziert.setObjectName("eingabe_infiziert")
self.horizontalLayout_14.addWidget(self.eingabe_infiziert)
self.verticalLayout_3.addLayout(self.horizontalLayout_14)
self.horizontalLayout_11 = QtWidgets.QHBoxLayout()
self.horizontalLayout_11.setObjectName("horizontalLayout_11")
self.label_bewegungsgeschw = QtWidgets.QLabel(self.parameter)
self.label_bewegungsgeschw.setMinimumSize(QtCore.QSize(170, 25))
self.label_bewegungsgeschw.setMaximumSize(QtCore.QSize(170, 16777215))
self.label_bewegungsgeschw.setObjectName("label_bewegungsgeschw")
self.horizontalLayout_11.addWidget(self.label_bewegungsgeschw)
self.slider_bewegungsgeschw = QtWidgets.QSlider(self.parameter)
self.slider_bewegungsgeschw.setMinimumSize(QtCore.QSize(80, 0))
self.slider_bewegungsgeschw.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.slider_bewegungsgeschw.setAutoFillBackground(False)
self.slider_bewegungsgeschw.setMinimum(1)
self.slider_bewegungsgeschw.setMaximum(3)
self.slider_bewegungsgeschw.setSingleStep(1)
self.slider_bewegungsgeschw.setPageStep(1)
self.slider_bewegungsgeschw.setSliderPosition(2)
self.slider_bewegungsgeschw.setOrientation(QtCore.Qt.Horizontal)
self.slider_bewegungsgeschw.setTickPosition(QtWidgets.QSlider.NoTicks)
self.slider_bewegungsgeschw.setTickInterval(1)
self.slider_bewegungsgeschw.setObjectName("slider_bewegungsgeschw")
self.horizontalLayout_11.addWidget(self.slider_bewegungsgeschw)
self.verticalLayout_3.addLayout(self.horizontalLayout_11)
self.horizontalLayout_45 = QtWidgets.QHBoxLayout()
self.horizontalLayout_45.setContentsMargins(-1, 0, -1, -1)
self.horizontalLayout_45.setObjectName("horizontalLayout_45")
self.label_39 = QtWidgets.QLabel(self.parameter)
self.label_39.setMinimumSize(QtCore.QSize(170, 25))
self.label_39.setMaximumSize(QtCore.QSize(170, 16777215))
self.label_39.setObjectName("label_39")
self.horizontalLayout_45.addWidget(self.label_39)
spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_45.addItem(spacerItem1)
self.check_collision = AnimatedToggle(self.parameter)
self.check_collision.setMinimumSize(QtCore.QSize(55, 0))
self.check_collision.setMaximumSize(QtCore.QSize(55, 16777215))
self.check_collision.setText("")
self.check_collision.setObjectName("check_collision")
self.horizontalLayout_45.addWidget(self.check_collision)
self.verticalLayout_3.addLayout(self.horizontalLayout_45)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.label_infektionswahr = QtWidgets.QLabel(self.parameter)
self.label_infektionswahr.setMinimumSize(QtCore.QSize(170, 21))
self.label_infektionswahr.setMaximumSize(QtCore.QSize(170, 16777215))
self.label_infektionswahr.setObjectName("label_infektionswahr")
self.horizontalLayout_2.addWidget(self.label_infektionswahr)
self.eingabe_infektionswahr = QtWidgets.QLineEdit(self.parameter)
self.eingabe_infektionswahr.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.eingabe_infektionswahr.setInputMask("")
self.eingabe_infektionswahr.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.eingabe_infektionswahr.setPlaceholderText("")
self.eingabe_infektionswahr.setObjectName("eingabe_infektionswahr")
self.horizontalLayout_2.addWidget(self.eingabe_infektionswahr)
self.label_5 = QtWidgets.QLabel(self.parameter)
self.label_5.setObjectName("label_5")
self.horizontalLayout_2.addWidget(self.label_5)
self.verticalLayout_3.addLayout(self.horizontalLayout_2)
self.horizontalLayout_27 = QtWidgets.QHBoxLayout()
self.horizontalLayout_27.setContentsMargins(-1, 0, -1, -1)
self.horizontalLayout_27.setObjectName("horizontalLayout_27")
self.label_27 = QtWidgets.QLabel(self.parameter)
self.label_27.setMinimumSize(QtCore.QSize(170, 25))
self.label_27.setMaximumSize(QtCore.QSize(170, 16777215))
self.label_27.setObjectName("label_27")
self.horizontalLayout_27.addWidget(self.label_27)
self.slider_infection_radius = QtWidgets.QSlider(self.parameter)
self.slider_infection_radius.setMinimum(0)
self.slider_infection_radius.setMaximum(9)
self.slider_infection_radius.setSingleStep(1)
self.slider_infection_radius.setPageStep(1)
self.slider_infection_radius.setProperty("value", 0)
self.slider_infection_radius.setSliderPosition(0)
self.slider_infection_radius.setTracking(True)
self.slider_infection_radius.setOrientation(QtCore.Qt.Horizontal)
self.slider_infection_radius.setTickPosition(QtWidgets.QSlider.NoTicks)
self.slider_infection_radius.setTickInterval(1)
self.slider_infection_radius.setObjectName("slider_infection_radius")
self.horizontalLayout_27.addWidget(self.slider_infection_radius)
self.verticalLayout_3.addLayout(self.horizontalLayout_27)
self.horizontalLayout_6 = QtWidgets.QHBoxLayout()
self.horizontalLayout_6.setObjectName("horizontalLayout_6")
self.label_sterberate = QtWidgets.QLabel(self.parameter)
self.label_sterberate.setMinimumSize(QtCore.QSize(170, 21))
self.label_sterberate.setMaximumSize(QtCore.QSize(170, 16777215))
self.label_sterberate.setObjectName("label_sterberate")
self.horizontalLayout_6.addWidget(self.label_sterberate)
self.eingabe_sterberate = QtWidgets.QLineEdit(self.parameter)
self.eingabe_sterberate.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.eingabe_sterberate.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.eingabe_sterberate.setPlaceholderText("")
self.eingabe_sterberate.setObjectName("eingabe_sterberate")
self.horizontalLayout_6.addWidget(self.eingabe_sterberate)
self.label_4 = QtWidgets.QLabel(self.parameter)
self.label_4.setObjectName("label_4")
self.horizontalLayout_6.addWidget(self.label_4)
self.verticalLayout_3.addLayout(self.horizontalLayout_6)
self.horizontalLayout_38 = QtWidgets.QHBoxLayout()
self.horizontalLayout_38.setContentsMargins(-1, 0, -1, -1)
self.horizontalLayout_38.setObjectName("horizontalLayout_38")
self.label_34 = QtWidgets.QLabel(self.parameter)
self.label_34.setMinimumSize(QtCore.QSize(170, 25))
self.label_34.setMaximumSize(QtCore.QSize(170, 16777215))
self.label_34.setObjectName("label_34")
self.horizontalLayout_38.addWidget(self.label_34)
spacerItem2 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_38.addItem(spacerItem2)
self.check_risk_group = AnimatedToggle(self.parameter)
self.check_risk_group.setMinimumSize(QtCore.QSize(55, 0))
self.check_risk_group.setMaximumSize(QtCore.QSize(55, 16777215))
self.check_risk_group.setText("")
self.check_risk_group.setObjectName("check_risk_group")
self.horizontalLayout_38.addWidget(self.check_risk_group)
self.verticalLayout_3.addLayout(self.horizontalLayout_38)
self.frame_risk_group = QtWidgets.QFrame(self.parameter)
self.frame_risk_group.setMinimumSize(QtCore.QSize(0, 50))
self.frame_risk_group.setFrameShape(QtWidgets.QFrame.NoFrame)
self.frame_risk_group.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_risk_group.setObjectName("frame_risk_group")
self.verticalLayout_5 = QtWidgets.QVBoxLayout(self.frame_risk_group)
self.verticalLayout_5.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_5.setSpacing(5)
self.verticalLayout_5.setObjectName("verticalLayout_5")
self.horizontalLayout_39 = QtWidgets.QHBoxLayout()
self.horizontalLayout_39.setObjectName("horizontalLayout_39")
self.label_35 = QtWidgets.QLabel(self.frame_risk_group)
self.label_35.setMinimumSize(QtCore.QSize(170, 25))
self.label_35.setMaximumSize(QtCore.QSize(170, 16777215))
self.label_35.setObjectName("label_35")
self.horizontalLayout_39.addWidget(self.label_35)
self.range_risk_group_age = RangeSlider(self.frame_risk_group)
self.range_risk_group_age.setLayoutDirection(QtCore.Qt.LeftToRight)
self.range_risk_group_age.setOrientation(QtCore.Qt.Horizontal)
self.range_risk_group_age.setTickPosition(QtWidgets.QSlider.NoTicks)
self.range_risk_group_age.setObjectName("range_risk_group_age")
self.horizontalLayout_39.addWidget(self.range_risk_group_age)
self.verticalLayout_5.addLayout(self.horizontalLayout_39)
self.horizontalLayout_40 = QtWidgets.QHBoxLayout()
self.horizontalLayout_40.setObjectName("horizontalLayout_40")
self.label_36 = QtWidgets.QLabel(self.frame_risk_group)
self.label_36.setMinimumSize(QtCore.QSize(170, 21))
self.label_36.setMaximumSize(QtCore.QSize(170, 16777215))
self.label_36.setObjectName("label_36")
self.horizontalLayout_40.addWidget(self.label_36)
self.eingabe_risk_group_death_rate = QtWidgets.QLineEdit(self.frame_risk_group)
self.eingabe_risk_group_death_rate.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.eingabe_risk_group_death_rate.setObjectName("eingabe_risk_group_death_rate")
self.horizontalLayout_40.addWidget(self.eingabe_risk_group_death_rate)
self.label_37 = QtWidgets.QLabel(self.frame_risk_group)
self.label_37.setObjectName("label_37")
self.horizontalLayout_40.addWidget(self.label_37)
self.verticalLayout_5.addLayout(self.horizontalLayout_40)
self.verticalLayout_3.addWidget(self.frame_risk_group)
self.horizontalLayout_15 = QtWidgets.QHBoxLayout()
self.horizontalLayout_15.setObjectName("horizontalLayout_15")
self.label = QtWidgets.QLabel(self.parameter)
self.label.setMinimumSize(QtCore.QSize(170, 21))
self.label.setMaximumSize(QtCore.QSize(170, 16777215))
self.label.setObjectName("label")
self.horizontalLayout_15.addWidget(self.label)
self.eingabe_incubation_time = QtWidgets.QLineEdit(self.parameter)
self.eingabe_incubation_time.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.eingabe_incubation_time.setObjectName("eingabe_incubation_time")
self.horizontalLayout_15.addWidget(self.eingabe_incubation_time)
self.label_3 = QtWidgets.QLabel(self.parameter)
self.label_3.setObjectName("label_3")
self.horizontalLayout_15.addWidget(self.label_3)
self.verticalLayout_3.addLayout(self.horizontalLayout_15)
self.horizontalLayout_16 = QtWidgets.QHBoxLayout()
self.horizontalLayout_16.setObjectName("horizontalLayout_16")
self.label_2 = QtWidgets.QLabel(self.parameter)
self.label_2.setMinimumSize(QtCore.QSize(170, 21))
self.label_2.setMaximumSize(QtCore.QSize(170, 16777215))
self.label_2.setStatusTip("")
self.label_2.setLineWidth(1)
self.label_2.setObjectName("label_2")
self.horizontalLayout_16.addWidget(self.label_2)
self.eingabe_sterbezeitpunkt = QtWidgets.QLineEdit(self.parameter)
self.eingabe_sterbezeitpunkt.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.eingabe_sterbezeitpunkt.setObjectName("eingabe_sterbezeitpunkt")
self.horizontalLayout_16.addWidget(self.eingabe_sterbezeitpunkt)
self.label_6 = QtWidgets.QLabel(self.parameter)
self.label_6.setObjectName("label_6")
self.horizontalLayout_16.addWidget(self.label_6)
self.verticalLayout_3.addLayout(self.horizontalLayout_16)
self.horizontalLayout_21 = QtWidgets.QHBoxLayout()
self.horizontalLayout_21.setContentsMargins(-1, 0, -1, -1)
self.horizontalLayout_21.setObjectName("horizontalLayout_21")
self.label_22 = QtWidgets.QLabel(self.parameter)
self.label_22.setMinimumSize(QtCore.QSize(170, 21))
self.label_22.setMaximumSize(QtCore.QSize(170, 16777215))
self.label_22.setObjectName("label_22")
self.horizontalLayout_21.addWidget(self.label_22)
self.eingabe_genesungszeitpunkt = QtWidgets.QLineEdit(self.parameter)
self.eingabe_genesungszeitpunkt.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.eingabe_genesungszeitpunkt.setObjectName("eingabe_genesungszeitpunkt")
self.horizontalLayout_21.addWidget(self.eingabe_genesungszeitpunkt)
self.label_21 = QtWidgets.QLabel(self.parameter)
self.label_21.setObjectName("label_21")
self.horizontalLayout_21.addWidget(self.label_21)
self.verticalLayout_3.addLayout(self.horizontalLayout_21)
self.horizontalLayout_22 = QtWidgets.QHBoxLayout()
self.horizontalLayout_22.setContentsMargins(-1, 0, -1, 0)
self.horizontalLayout_22.setObjectName("horizontalLayout_22")
self.label_23 = QtWidgets.QLabel(self.parameter)
self.label_23.setMinimumSize(QtCore.QSize(170, 25))
self.label_23.setMaximumSize(QtCore.QSize(170, 16777215))
self.label_23.setObjectName("label_23")
self.horizontalLayout_22.addWidget(self.label_23)
spacerItem3 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_22.addItem(spacerItem3)
self.check_reinfection = AnimatedToggle(self.parameter)
self.check_reinfection.setMinimumSize(QtCore.QSize(55, 0))
self.check_reinfection.setMaximumSize(QtCore.QSize(55, 16777215))
self.check_reinfection.setLayoutDirection(QtCore.Qt.RightToLeft)
self.check_reinfection.setStyleSheet("")
self.check_reinfection.setText("")
self.check_reinfection.setObjectName("check_reinfection")
self.horizontalLayout_22.addWidget(self.check_reinfection)
self.verticalLayout_3.addLayout(self.horizontalLayout_22)
self.frame_wiederansteckungsrate = QtWidgets.QFrame(self.parameter)
self.frame_wiederansteckungsrate.setMinimumSize(QtCore.QSize(0, 0))
self.frame_wiederansteckungsrate.setFrameShape(QtWidgets.QFrame.NoFrame)
self.frame_wiederansteckungsrate.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_wiederansteckungsrate.setLineWidth(0)
self.frame_wiederansteckungsrate.setObjectName("frame_wiederansteckungsrate")
self.horizontalLayout_26 = QtWidgets.QHBoxLayout(self.frame_wiederansteckungsrate)
self.horizontalLayout_26.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_26.setObjectName("horizontalLayout_26")
self.horizontalLayout_24 = QtWidgets.QHBoxLayout()
self.horizontalLayout_24.setContentsMargins(-1, 0, -1, -1)
self.horizontalLayout_24.setObjectName("horizontalLayout_24")
self.label_25 = QtWidgets.QLabel(self.frame_wiederansteckungsrate)
self.label_25.setMinimumSize(QtCore.QSize(170, 21))
self.label_25.setMaximumSize(QtCore.QSize(170, 16777215))
self.label_25.setObjectName("label_25")
self.horizontalLayout_24.addWidget(self.label_25)
self.eingabe_wiederanstreckungsrate = QtWidgets.QLineEdit(self.frame_wiederansteckungsrate)
self.eingabe_wiederanstreckungsrate.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.eingabe_wiederanstreckungsrate.setObjectName("eingabe_wiederanstreckungsrate")
self.horizontalLayout_24.addWidget(self.eingabe_wiederanstreckungsrate)
self.label_26 = QtWidgets.QLabel(self.frame_wiederansteckungsrate)
self.label_26.setObjectName("label_26")
self.horizontalLayout_24.addWidget(self.label_26)
self.horizontalLayout_26.addLayout(self.horizontalLayout_24)
self.verticalLayout_3.addWidget(self.frame_wiederansteckungsrate)
self.horizontalLayout_28 = QtWidgets.QHBoxLayout()
self.horizontalLayout_28.setContentsMargins(-1, 0, -1, -1)
self.horizontalLayout_28.setObjectName("horizontalLayout_28")
self.label_28 = QtWidgets.QLabel(self.parameter)
self.label_28.setMinimumSize(QtCore.QSize(170, 25))
self.label_28.setMaximumSize(QtCore.QSize(170, 16777215))
self.label_28.setObjectName("label_28")
self.horizontalLayout_28.addWidget(self.label_28)
spacerItem4 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_28.addItem(spacerItem4)
self.check_social_distancing = AnimatedToggle(self.parameter)
self.check_social_distancing.setMinimumSize(QtCore.QSize(55, 0))
self.check_social_distancing.setMaximumSize(QtCore.QSize(55, 16777215))
self.check_social_distancing.setLayoutDirection(QtCore.Qt.RightToLeft)
self.check_social_distancing.setText("")
self.check_social_distancing.setObjectName("check_social_distancing")
self.horizontalLayout_28.addWidget(self.check_social_distancing)
self.verticalLayout_3.addLayout(self.horizontalLayout_28)
self.frame_social_distancing = QtWidgets.QFrame(self.parameter)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.frame_social_distancing.sizePolicy().hasHeightForWidth())
self.frame_social_distancing.setSizePolicy(sizePolicy)
self.frame_social_distancing.setMinimumSize(QtCore.QSize(0, 0))
self.frame_social_distancing.setSizeIncrement(QtCore.QSize(0, 0))
self.frame_social_distancing.setBaseSize(QtCore.QSize(0, 0))
self.frame_social_distancing.setFrameShape(QtWidgets.QFrame.NoFrame)
self.frame_social_distancing.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_social_distancing.setObjectName("frame_social_distancing")
self.horizontalLayout_30 = QtWidgets.QHBoxLayout(self.frame_social_distancing)
self.horizontalLayout_30.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_30.setObjectName("horizontalLayout_30")
self.horizontalLayout_29 = QtWidgets.QHBoxLayout()
self.horizontalLayout_29.setContentsMargins(-1, 0, -1, -1)
self.horizontalLayout_29.setObjectName("horizontalLayout_29")
self.label_29 = QtWidgets.QLabel(self.frame_social_distancing)
self.label_29.setMinimumSize(QtCore.QSize(170, 25))
self.label_29.setMaximumSize(QtCore.QSize(170, 16777215))
self.label_29.setObjectName("label_29")
self.horizontalLayout_29.addWidget(self.label_29)
self.slider_social_distancing = QtWidgets.QSlider(self.frame_social_distancing)
self.slider_social_distancing.setMinimum(1)
self.slider_social_distancing.setMaximum(5)
self.slider_social_distancing.setPageStep(1)
self.slider_social_distancing.setOrientation(QtCore.Qt.Horizontal)
self.slider_social_distancing.setTickPosition(QtWidgets.QSlider.NoTicks)
self.slider_social_distancing.setObjectName("slider_social_distancing")
self.horizontalLayout_29.addWidget(self.slider_social_distancing)
self.horizontalLayout_30.addLayout(self.horizontalLayout_29)
self.verticalLayout_3.addWidget(self.frame_social_distancing)
self.horizontalLayout_23 = QtWidgets.QHBoxLayout()
self.horizontalLayout_23.setContentsMargins(-1, 0, -1, -1)
self.horizontalLayout_23.setObjectName("horizontalLayout_23")
self.label_24 = QtWidgets.QLabel(self.parameter)
self.label_24.setMinimumSize(QtCore.QSize(170, 21))
self.label_24.setMaximumSize(QtCore.QSize(170, 16777215))
self.label_24.setObjectName("label_24")
self.horizontalLayout_23.addWidget(self.label_24)
spacerItem5 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_23.addItem(spacerItem5)
self.check_lockdown = AnimatedToggle(self.parameter)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.check_lockdown.sizePolicy().hasHeightForWidth())
self.check_lockdown.setSizePolicy(sizePolicy)
self.check_lockdown.setMinimumSize(QtCore.QSize(55, 0))
self.check_lockdown.setMaximumSize(QtCore.QSize(55, 16777215))
self.check_lockdown.setLayoutDirection(QtCore.Qt.RightToLeft)
self.check_lockdown.setText("")
self.check_lockdown.setObjectName("check_lockdown")
self.horizontalLayout_23.addWidget(self.check_lockdown)
self.verticalLayout_3.addLayout(self.horizontalLayout_23)
self.frame_lockdown = QtWidgets.QFrame(self.parameter)
self.frame_lockdown.setMinimumSize(QtCore.QSize(0, 0))
self.frame_lockdown.setFrameShape(QtWidgets.QFrame.NoFrame)
self.frame_lockdown.setFrameShadow(QtWidgets.QFrame.Plain)
self.frame_lockdown.setLineWidth(0)
self.frame_lockdown.setObjectName("frame_lockdown")
self.horizontalLayout_25 = QtWidgets.QHBoxLayout(self.frame_lockdown)
self.horizontalLayout_25.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_25.setObjectName("horizontalLayout_25")
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.label_lockdown = QtWidgets.QLabel(self.frame_lockdown)
self.label_lockdown.setMinimumSize(QtCore.QSize(170, 25))
self.label_lockdown.setMaximumSize(QtCore.QSize(170, 16777215))
self.label_lockdown.setObjectName("label_lockdown")
self.horizontalLayout_3.addWidget(self.label_lockdown)
self.slider_lockdown = QtWidgets.QSlider(self.frame_lockdown)
self.slider_lockdown.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.slider_lockdown.setAutoFillBackground(False)
self.slider_lockdown.setMinimum(1)
self.slider_lockdown.setMaximum(4)
self.slider_lockdown.setProperty("value", 1)
self.slider_lockdown.setOrientation(QtCore.Qt.Horizontal)
self.slider_lockdown.setTickPosition(QtWidgets.QSlider.NoTicks)
self.slider_lockdown.setTickInterval(1)
self.slider_lockdown.setObjectName("slider_lockdown")
self.horizontalLayout_3.addWidget(self.slider_lockdown)
self.horizontalLayout_25.addLayout(self.horizontalLayout_3)
self.verticalLayout_3.addWidget(self.frame_lockdown)
self.horizontalLayout_5 = QtWidgets.QHBoxLayout()
self.horizontalLayout_5.setObjectName("horizontalLayout_5")
self.label_quarantaene = QtWidgets.QLabel(self.parameter)
self.label_quarantaene.setMinimumSize(QtCore.QSize(170, 21))
self.label_quarantaene.setMaximumSize(QtCore.QSize(170, 16777215))
self.label_quarantaene.setObjectName("label_quarantaene")
self.horizontalLayout_5.addWidget(self.label_quarantaene)
spacerItem6 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_5.addItem(spacerItem6)
self.check_quarantaene = AnimatedToggle(self.parameter)
self.check_quarantaene.setEnabled(True)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.check_quarantaene.sizePolicy().hasHeightForWidth())
self.check_quarantaene.setSizePolicy(sizePolicy)
self.check_quarantaene.setMinimumSize(QtCore.QSize(55, 0))
self.check_quarantaene.setMaximumSize(QtCore.QSize(55, 16777215))
self.check_quarantaene.setAccessibleDescription("")
self.check_quarantaene.setLayoutDirection(QtCore.Qt.RightToLeft)
self.check_quarantaene.setText("")
self.check_quarantaene.setCheckable(True)
self.check_quarantaene.setObjectName("check_quarantaene")
self.horizontalLayout_5.addWidget(self.check_quarantaene)
self.verticalLayout_3.addLayout(self.horizontalLayout_5)
self.frame_quarantaeneausbrecher = QtWidgets.QFrame(self.parameter)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.frame_quarantaeneausbrecher.sizePolicy().hasHeightForWidth())
self.frame_quarantaeneausbrecher.setSizePolicy(sizePolicy)
self.frame_quarantaeneausbrecher.setMinimumSize(QtCore.QSize(0, 0))
font = QtGui.QFont()
font.setKerning(True)
self.frame_quarantaeneausbrecher.setFont(font)
self.frame_quarantaeneausbrecher.setContextMenuPolicy(QtCore.Qt.DefaultContextMenu)
self.frame_quarantaeneausbrecher.setToolTipDuration(-1)
self.frame_quarantaeneausbrecher.setInputMethodHints(QtCore.Qt.ImhNone)
self.frame_quarantaeneausbrecher.setFrameShape(QtWidgets.QFrame.NoFrame)
self.frame_quarantaeneausbrecher.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_quarantaeneausbrecher.setLineWidth(0)
self.frame_quarantaeneausbrecher.setObjectName("frame_quarantaeneausbrecher")
self.horizontalLayout_20 = QtWidgets.QHBoxLayout(self.frame_quarantaeneausbrecher)
self.horizontalLayout_20.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_20.setObjectName("horizontalLayout_20")
self.layout_quarantaeneausbrecher = QtWidgets.QHBoxLayout()
self.layout_quarantaeneausbrecher.setContentsMargins(-1, 0, -1, -1)
self.layout_quarantaeneausbrecher.setObjectName("layout_quarantaeneausbrecher")
self.label_quarantaeneausbrecher = QtWidgets.QLabel(self.frame_quarantaeneausbrecher)
self.label_quarantaeneausbrecher.setMinimumSize(QtCore.QSize(170, 25))
self.label_quarantaeneausbrecher.setMaximumSize(QtCore.QSize(170, 16777215))
self.label_quarantaeneausbrecher.setObjectName("label_quarantaeneausbrecher")
self.layout_quarantaeneausbrecher.addWidget(self.label_quarantaeneausbrecher)
self.eingabe_quarantaeneausbrecher = QtWidgets.QLineEdit(self.frame_quarantaeneausbrecher)
self.eingabe_quarantaeneausbrecher.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.eingabe_quarantaeneausbrecher.setObjectName("eingabe_quarantaeneausbrecher")
self.layout_quarantaeneausbrecher.addWidget(self.eingabe_quarantaeneausbrecher)
self.label_19 = QtWidgets.QLabel(self.frame_quarantaeneausbrecher)
self.label_19.setObjectName("label_19")
self.layout_quarantaeneausbrecher.addWidget(self.label_19)
self.horizontalLayout_20.addLayout(self.layout_quarantaeneausbrecher)
self.verticalLayout_3.addWidget(self.frame_quarantaeneausbrecher)
self.horizontalLayout_34 = QtWidgets.QHBoxLayout()
self.horizontalLayout_34.setContentsMargins(-1, 0, -1, -1)
self.horizontalLayout_34.setObjectName("horizontalLayout_34")
self.label_20 = QtWidgets.QLabel(self.parameter)
self.label_20.setMinimumSize(QtCore.QSize(170, 25))
self.label_20.setMaximumSize(QtCore.QSize(170, 16777215))
self.label_20.setObjectName("label_20")
self.horizontalLayout_34.addWidget(self.label_20)
spacerItem7 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_34.addItem(spacerItem7)
self.check_extended_view = AnimatedToggle(self.parameter)
self.check_extended_view.setMinimumSize(QtCore.QSize(55, 0))
self.check_extended_view.setMaximumSize(QtCore.QSize(55, 16777215))
self.check_extended_view.setLayoutDirection(QtCore.Qt.RightToLeft)
self.check_extended_view.setText("")
self.check_extended_view.setObjectName("check_extended_view")
self.horizontalLayout_34.addWidget(self.check_extended_view)
self.verticalLayout_3.addLayout(self.horizontalLayout_34)
self.frame_extended_view = QtWidgets.QFrame(self.parameter)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.frame_extended_view.sizePolicy().hasHeightForWidth())
self.frame_extended_view.setSizePolicy(sizePolicy)
self.frame_extended_view.setMinimumSize(QtCore.QSize(30, 30))
self.frame_extended_view.setStyleSheet("height:20px")
self.frame_extended_view.setFrameShape(QtWidgets.QFrame.NoFrame)
self.frame_extended_view.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_extended_view.setObjectName("frame_extended_view")
self.gridLayout_4 = QtWidgets.QGridLayout(self.frame_extended_view)
self.gridLayout_4.setContentsMargins(0, 0, 0, 0)
self.gridLayout_4.setObjectName("gridLayout_4")
self.gridLayout_3 = QtWidgets.QGridLayout()
self.gridLayout_3.setContentsMargins(-1, -1, -1, 0)
self.gridLayout_3.setObjectName("gridLayout_3")
self.horizontalLayout_35 = QtWidgets.QHBoxLayout()
self.horizontalLayout_35.setObjectName("horizontalLayout_35")
self.check_show_infection_radius = QtWidgets.QCheckBox(self.frame_extended_view)
self.check_show_infection_radius.setMinimumSize(QtCore.QSize(0, 0))
self.check_show_infection_radius.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.check_show_infection_radius.setLayoutDirection(QtCore.Qt.LeftToRight)
self.check_show_infection_radius.setText("")
self.check_show_infection_radius.setObjectName("check_show_infection_radius")
self.horizontalLayout_35.addWidget(self.check_show_infection_radius)
self.label_31 = QtWidgets.QLabel(self.frame_extended_view)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_31.sizePolicy().hasHeightForWidth())
self.label_31.setSizePolicy(sizePolicy)
self.label_31.setMinimumSize(QtCore.QSize(0, 20))
self.label_31.setObjectName("label_31")
self.horizontalLayout_35.addWidget(self.label_31)
spacerItem8 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_35.addItem(spacerItem8)
self.gridLayout_3.addLayout(self.horizontalLayout_35, 0, 0, 1, 1)
self.horizontalLayout_43 = QtWidgets.QHBoxLayout()
self.horizontalLayout_43.setContentsMargins(-1, 0, -1, -1)
self.horizontalLayout_43.setObjectName("horizontalLayout_43")
self.check_show_humans = QtWidgets.QCheckBox(self.frame_extended_view)
self.check_show_humans.setText("")
self.check_show_humans.setObjectName("check_show_humans")
self.horizontalLayout_43.addWidget(self.check_show_humans)
self.label_38 = QtWidgets.QLabel(self.frame_extended_view)
self.label_38.setObjectName("label_38")
self.horizontalLayout_43.addWidget(self.label_38)
spacerItem9 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_43.addItem(spacerItem9)
self.gridLayout_3.addLayout(self.horizontalLayout_43, 1, 0, 1, 1)
self.horizontalLayout_46 = QtWidgets.QHBoxLayout()
self.horizontalLayout_46.setObjectName("horizontalLayout_46")
self.check_show_social_distance = QtWidgets.QCheckBox(self.frame_extended_view)
self.check_show_social_distance.setMaximumSize(QtCore.QSize(19, 16777215))
self.check_show_social_distance.setText("")
self.check_show_social_distance.setObjectName("check_show_social_distance")
self.horizontalLayout_46.addWidget(self.check_show_social_distance)
self.label_40 = QtWidgets.QLabel(self.frame_extended_view)
self.label_40.setObjectName("label_40")
self.horizontalLayout_46.addWidget(self.label_40)
spacerItem10 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_46.addItem(spacerItem10)
self.gridLayout_3.addLayout(self.horizontalLayout_46, 0, 1, 1, 1)
self.horizontalLayout_36 = QtWidgets.QHBoxLayout()
self.horizontalLayout_36.setObjectName("horizontalLayout_36")
self.check_show_home = QtWidgets.QCheckBox(self.frame_extended_view)
self.check_show_home.setMinimumSize(QtCore.QSize(0, 0))
self.check_show_home.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.check_show_home.setText("")
self.check_show_home.setObjectName("check_show_home")
self.horizontalLayout_36.addWidget(self.check_show_home)
self.label_32 = QtWidgets.QLabel(self.frame_extended_view)
self.label_32.setObjectName("label_32")
self.horizontalLayout_36.addWidget(self.label_32)
spacerItem11 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_36.addItem(spacerItem11)
self.gridLayout_3.addLayout(self.horizontalLayout_36, 1, 1, 1, 1)
self.gridLayout_3.setRowMinimumHeight(0, 20)
self.gridLayout_3.setRowMinimumHeight(1, 20)
self.gridLayout_3.setColumnStretch(0, 1)
self.gridLayout_3.setColumnStretch(1, 1)
self.gridLayout_4.addLayout(self.gridLayout_3, 0, 0, 1, 1)
self.verticalLayout_3.addWidget(self.frame_extended_view)
spacerItem12 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_3.addItem(spacerItem12)
self.mainLayout_parameter.addLayout(self.verticalLayout_3)
self.horizontalLayout_18.addLayout(self.mainLayout_parameter)
self.stackedWidget_parameter.addWidget(self.parameter)
self.export_2 = QtWidgets.QWidget()
self.export_2.setObjectName("export_2")
self.gridLayout_2 = QtWidgets.QGridLayout(self.export_2)
self.gridLayout_2.setContentsMargins(0, 0, 0, 0)
self.gridLayout_2.setObjectName("gridLayout_2")
self.verticalLayout_2 = QtWidgets.QVBoxLayout()
self.verticalLayout_2.setSpacing(5)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.label_exportieren = QtWidgets.QLabel(self.export_2)
self.label_exportieren.setMaximumSize(QtCore.QSize(16777215, 25))
font = QtGui.QFont()
font.setPointSize(18)
self.label_exportieren.setFont(font)
self.label_exportieren.setAlignment(QtCore.Qt.AlignCenter)
self.label_exportieren.setObjectName("label_exportieren")
self.verticalLayout_2.addWidget(self.label_exportieren)
spacerItem13 = QtWidgets.QSpacerItem(0, 2, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Maximum)
self.verticalLayout_2.addItem(spacerItem13)
self.horizontalLayout_50 = QtWidgets.QHBoxLayout()
self.horizontalLayout_50.setContentsMargins(-1, 0, -1, -1)
self.horizontalLayout_50.setObjectName("horizontalLayout_50")
self.label_42 = QtWidgets.QLabel(self.export_2)
self.label_42.setMinimumSize(QtCore.QSize(170, 25))
self.label_42.setMaximumSize(QtCore.QSize(170, 16777215))
self.label_42.setObjectName("label_42")
self.horizontalLayout_50.addWidget(self.label_42)
spacerItem14 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_50.addItem(spacerItem14)
self.check_export_csv = AnimatedToggle(self.export_2)
| |
import configparser
import json
import os
import socket
import time
import timeit
import datetime
import random
import docker
from requests.packages.urllib3.exceptions import ReadTimeoutError
from requests.exceptions import ReadTimeout, ConnectionError
from agaveflask.logs import get_logger, get_log_file_strategy
logger = get_logger(__name__)
from channels import ExecutionResultsChannel
from config import Config
from codes import BUSY, READY, RUNNING
import encrypt_utils
import globals
from models import Actor, Execution, get_current_utc_time, display_time, ActorConfig
from stores import workers_store, alias_store, configs_store
TAG = os.environ.get('TAG') or Config.get('general', 'TAG') or ''
if not TAG[0] == ':':
TAG = ':{}'.format(TAG)
AE_IMAGE = '{}{}'.format(os.environ.get('AE_IMAGE', 'abaco/core'), TAG)
# timeout (in seconds) for the socket server
RESULTS_SOCKET_TIMEOUT = 0.1
# max frame size, in bytes, for a single result
MAX_RESULT_FRAME_SIZE = 131072
max_run_time = int(Config.get('workers', 'max_run_time'))
dd = Config.get('docker', 'dd')
host_id = os.environ.get('SPAWNER_HOST_ID', Config.get('spawner', 'host_id'))
logger.debug("host_id: {}".format(host_id))
host_ip = Config.get('spawner', 'host_ip')
class DockerError(Exception):
def __init__(self, message):
Exception.__init__(self, message)
self.message = message
class DockerStartContainerError(DockerError):
pass
class DockerStopContainerError(DockerError):
pass
def get_docker_credentials():
"""
Get the docker credentials from the config.
"""
# we try to get as many credentials as have been
creds = []
cnt = 1
while True:
try:
username = Config.get('docker', f'dockerhub_username_{cnt}')
password = Config.get('docker', f'dockerhub_password_{cnt}')
except:
break
if not username or not password:
break
creds.append({'username': username, 'password': password})
cnt = cnt + 1
return creds
dockerhub_creds = get_docker_credentials()
def get_random_dockerhub_cred():
"""
Chose a dockerhub credential at random
"""
if len(dockerhub_creds) == 0:
return None, None
creds = random.choice(dockerhub_creds)
try:
username = creds['username']
password = creds['password']
except Exception as e:
logger.debug("Got exception trying to get dockerhub credentials")
return None, None
return username, password
def cli_login(cli, username, password):
"""
Try to login a dockerhub cli with a username and password
"""
try:
cli.login(username=username, password=password)
except Exception as e:
logger.error(f"Could not login using dockerhub creds; username: {username}."
f"Exception: {e}")
def rm_container(cid):
"""
Remove a container.
:param cid:
:return:
"""
cli = docker.APIClient(base_url=dd, version="auto")
username, password = get_random_dockerhub_cred()
if username and password:
cli_login(cli, username, password)
try:
rsp = cli.remove_container(cid, force=True)
except Exception as e:
logger.info("Got exception trying to remove container: {}. Exception: {}".format(cid, e))
raise DockerError("Error removing container {}, exception: {}".format(cid, str(e)))
logger.info("container {} removed.".format(cid))
def pull_image(image):
"""
Update the local registry with an actor's image.
:param actor_id:
:return:
"""
logger.debug("top of pull_image()")
cli = docker.APIClient(base_url=dd, version="auto")
username, password = get_random_dockerhub_cred()
if username and password:
cli_login(cli, username, password)
try:
rsp = cli.pull(repository=image)
except Exception as e:
msg = "Error pulling image {} - exception: {} ".format(image, e)
logger.info(msg)
raise DockerError(msg)
if '"message":"Error' in rsp:
if '{} not found'.format(image) in rsp:
msg = "Image {} was not found on the public registry.".format(image)
logger.info(msg)
raise DockerError(msg)
else:
msg = "There was an error pulling the image: {}".format(rsp)
logger.error(msg)
raise DockerError(msg)
return rsp
def list_all_containers():
"""Returns a list of all containers """
cli = docker.APIClient(base_url=dd, version="auto")
return cli.containers()
def get_current_worker_containers():
worker_containers = []
containers = list_all_containers()
for c in containers:
if 'worker' in c['Names'][0]:
container_name = c['Names'][0]
# worker container names have format "/worker_<tenant>_<actor-id>_<worker-id>
# so split on _ to get parts
try:
parts = container_name.split('_')
tenant_id = parts[1]
actor_id = parts[2]
worker_id = parts[3]
worker_containers.append({'container': c,
'tenant_id': tenant_id,
'actor_id': actor_id,
'worker_id': worker_id
})
except:
pass
return worker_containers
def check_worker_containers_against_store():
"""
cheks the existing worker containers on a host against the status of the worker in the workers_store.
"""
worker_containers = get_current_worker_containers()
for idx, w in enumerate(worker_containers):
try:
# try to get the worker from the store:
store_key = '{}_{}_{}'.format(w['tenant_id'], w['actor_id'], w['worker_id'])
worker = workers_store[store_key]
except KeyError:
worker = {}
status = worker.get('status')
try:
last_execution_time = display_time(worker.get('last_execution_time'))
except:
last_execution_time = None
print(idx, '). ', w['actor_id'], w['worker_id'], status, last_execution_time)
def container_running(name=None):
"""Check if there is a running container whose name contains the string, `name`. Note that this function will
return True if any running container has a name which contains the input `name`.
"""
logger.debug("top of container_running().")
filters = {}
if name:
filters['name'] = name
cli = docker.APIClient(base_url=dd, version="auto")
try:
containers = cli.containers(filters=filters)
except Exception as e:
msg = "There was an error checking container_running for name: {}. Exception: {}".format(name, e)
logger.error(msg)
raise DockerError(msg)
logger.debug("found containers: {}".format(containers))
return len(containers) > 0
def run_container_with_docker(image,
command,
name=None,
environment={},
mounts=[],
log_file=None,
auto_remove=False,
client_id=None,
client_access_token=None,
client_refresh_token=None,
actor_id=None,
tenant=None,
api_server=None,
client_secret=None
):
"""
Run a container with docker mounted in it.
Note: this function always mounts the abaco conf file so it should not be used by execute_actor().
"""
logger.debug("top of run_container_with_docker().")
cli = docker.APIClient(base_url=dd, version="auto")
# bind the docker socket as r/w since this container gets docker.
volumes = ['/var/run/docker.sock']
binds = {'/var/run/docker.sock': {'bind': '/var/run/docker.sock', 'ro': False}}
# add a bind key and dictionary as well as a volume for each mount
for m in mounts:
binds[m.get('host_path')] = {'bind': m.get('container_path'),
'ro': m.get('format') == 'ro'}
volumes.append(m.get('host_path'))
# mount the abaco conf file. first we look for the environment variable, falling back to the value in Config.
try:
abaco_conf_host_path = os.environ.get('abaco_conf_host_path')
if not abaco_conf_host_path:
abaco_conf_host_path = Config.get('spawner', 'abaco_conf_host_path')
logger.debug("docker_utils using abaco_conf_host_path={}".format(abaco_conf_host_path))
# mount config file at the root of the container as r/o
volumes.append('/service.conf')
binds[abaco_conf_host_path] = {'bind': '/service.conf', 'ro': True}
except configparser.NoOptionError as e:
# if we're here, it's bad. we don't have a config file. better to cut and run,
msg = "Did not find the abaco_conf_host_path in Config. Exception: {}".format(e)
logger.error(msg)
raise DockerError(msg)
# also add it to the environment if not already there
if 'abaco_conf_host_path' not in environment:
environment['abaco_conf_host_path'] = abaco_conf_host_path
if 'client_id' not in environment:
environment['client_id'] = client_id
if 'client_access_token' not in environment:
environment['client_access_token'] = client_access_token
if 'actor_id' not in environment:
environment['actor_id'] = actor_id
if 'tenant' not in environment:
environment['tenant'] = tenant
if 'api_server' not in environment:
environment['api_server'] = api_server
if 'client_secret' not in environment:
environment['client_secret'] = client_secret
if 'client_refresh_token' not in environment:
environment['client_refresh_token'] = client_refresh_token
# if not passed, determine what log file to use
if not log_file:
if get_log_file_strategy() == 'split':
log_file = 'worker.log'
else:
log_file = 'abaco.log'
# mount the logs file.
volumes.append('/var/log/service.log')
# first check to see if the logs directory config was set:
try:
logs_host_dir = Config.get('logs', 'host_dir')
except (configparser.NoSectionError, configparser.NoOptionError):
# if the directory is not configured, default it to abaco_conf_host_path
logs_host_dir = os.path.dirname(abaco_conf_host_path)
binds['{}/{}'.format(logs_host_dir, log_file)] = {'bind': '/var/log/service.log', 'rw': True}
host_config = cli.create_host_config(binds=binds, auto_remove=auto_remove)
logger.debug("binds: {}".format(binds))
# add the container to a specific docker network, if configured
netconf = None
try:
docker_network = Config.get('spawner', 'docker_network')
except Exception:
docker_network = None
if docker_network:
netconf = cli.create_networking_config({docker_network: cli.create_endpoint_config()})
# create and start the container
try:
container = cli.create_container(image=image,
environment=environment,
volumes=volumes,
host_config=host_config,
command=command,
name=name,
networking_config=netconf)
cli.start(container=container.get('Id'))
logger.debug('container successfully started')
except Exception as e:
msg = "Got exception trying to run container from image: {}. Exception: {}".format(image, e)
logger.info(msg)
raise DockerError(msg)
logger.info("container started successfully: {}".format(container))
return container
def run_worker(image,
revision,
actor_id,
worker_id,
client_id,
client_access_token,
client_refresh_token,
tenant,
api_server,
client_secret):
"""
Run an actor executor worker with a given channel and image.
:return:
"""
logger.debug("top of run_worker()")
command = 'python3 -u /actors/worker.py'
logger.debug("docker_utils running worker. actor_id: {}; worker_id: {}; "
"image:{}, revision: {}; command:{}".format(actor_id, worker_id, image, revision, command))
# mount the directory on the host for creating fifos
try:
fifo_host_path_dir = Config.get('workers', 'fifo_host_path_dir')
logger.info("Using fifo_host_path_dir: {}".format(fifo_host_path_dir))
except (configparser.NoSectionError, configparser.NoOptionError) as e:
logger.error("Got exception trying to look up fifo_host_path_dir. Setting to None. Exception: {}".format(e))
fifo_host_path_dir = None
if fifo_host_path_dir:
mounts = [{'host_path': os.path.join(fifo_host_path_dir, worker_id),
'container_path': os.path.join(fifo_host_path_dir, worker_id),
'format': 'rw'}]
else:
mounts = []
# mount the directory on the host for creating result sockets
try:
socket_host_path_dir = Config.get('workers', 'socket_host_path_dir')
logger.info("Using socket_host_path_dir: {}".format(socket_host_path_dir))
except (configparser.NoSectionError, configparser.NoOptionError) as e:
logger.error("Got exception trying to look up fifo_host_path_dir. Setting to None. Exception: {}".format(e))
socket_host_path_dir = None
if socket_host_path_dir:
mounts.append({'host_path': os.path.join(socket_host_path_dir, worker_id),
'container_path': os.path.join(socket_host_path_dir, worker_id),
'format': 'rw'})
logger.info("Final fifo_host_path_dir: {}; socket_host_path_dir: {}".format(fifo_host_path_dir,
socket_host_path_dir))
try:
auto_remove = Config.get('workers', 'auto_remove')
except (configparser.NoSectionError, configparser.NoOptionError) as e:
logger.debug("no auto_remove in the workers stanza.")
auto_remove = True
if hasattr(auto_remove, 'lower'):
if auto_remove.lower() == 'false':
auto_remove = False
else:
auto_remove = True
elif not auto_remove == True:
auto_remove = False
container = run_container_with_docker(
image=AE_IMAGE,
command=command,
environment={
'image': image,
'revision': revision,
'worker_id': worker_id,
'_abaco_secret': os.environ.get('_abaco_secret')},
mounts=mounts,
log_file=None,
auto_remove=auto_remove,
name='worker_{}_{}'.format(actor_id, worker_id),
client_id=client_id,
client_access_token=client_access_token,
client_refresh_token=client_refresh_token,
actor_id=actor_id,
tenant=tenant,
api_server=api_server,
client_secret=client_secret
)
# don't catch errors -- if we get an error trying to run a worker, let it bubble up.
# | |
from abc import abstractmethod, ABC
from .config import CoreConfig
from ..common_imports import *
from ..util.common_ut import concat_lists, ContentHashing
HASH_METHODS = ('causal', 'content')
COLLECTION_HASH_METHOD = 'causal'
################################################################################
### bases
################################################################################
class Type(ABC):
def __init__(self, name:str=None):
self._name = name
######################################
### small but important
######################################
@property
def name(self) -> TOption[str]:
return self._name
def set_name(self, name:str):
# can be set to at most a single value
if self._name is not None:
if name != self._name:
raise ValueError()
if name is None:
raise ValueError()
self._name = name
@property
def is_named(self) -> bool:
return self._name is not None
def _reset_name(self):
self._name = None
######################################
###
######################################
@property
def annotation(self) -> TAny:
raise NotImplementedError()
@staticmethod
def from_tp_or_wrapper(obj:TUnion['Type', 'TypeWrapper']) -> 'Type':
if isinstance(obj, Type):
return obj
elif isinstance(obj, TypeWrapper):
return obj.tp
else:
raise TypeError(f'Got type {type(obj)}')
@staticmethod
def from_annotation(annotation:TAny) -> 'Type':
if (annotation is None) or (annotation is inspect._empty):
return AnyType()
if annotation == typing.Any:
return AnyType()
elif isinstance(annotation, Type):
return annotation
elif isinstance(annotation, TypeWrapper):
return annotation.tp
elif isinstance(annotation, type):
if annotation is list:
return ListType()
elif annotation is dict:
return DictType()
else:
return AtomType(annotation=annotation)
elif isinstance(annotation, typing.TypeVar):
return TypeVar(_id=annotation.__name__,
constraints=annotation.__constraints__)
elif hasattr(annotation, '__origin__'):
if annotation.__origin__ is list:
elt_annotation = annotation.__args__[0]
return ListType(
elt_type=Type.from_annotation(annotation=elt_annotation)
)
elif annotation.__origin__ is dict:
key_annotation = annotation.__args__[0]
assert key_annotation is str
value_annotation = annotation.__args__[1]
return DictType(
value_type=Type.from_annotation(value_annotation)
)
elif annotation.__origin__ is typing.Union:
return get_union(
tps=[Type.from_annotation(x) for x in annotation.__args__]
)
elif annotation.__origin__ is tuple:
return AtomType(annotation=annotation)
else:
raise NotImplementedError(f'Got annotation {annotation}')
else:
raise TypeError(f'Got value {annotation}')
@property
@abstractmethod
def is_compound(self) -> bool:
raise NotImplementedError()
@property
@abstractmethod
def is_concrete(self) -> bool:
"""
Whether this type can be the type of a value
"""
raise NotImplementedError()
### storage interface
@abstractmethod
def dump(self) -> TAny:
"""
Return JSON-serializable unique structural description of this type
"""
raise NotImplementedError()
@staticmethod
def load(residue) -> 'Type':
raise NotImplementedError()
def __eq__(self, other:'Type') -> bool:
if not isinstance(other, Type):
return False
return self.dump() == other.dump()
def hash(self) -> str:
return ContentHashing.DEFAULT(self.dump())
@abstractmethod
def subtypes(self) -> TIter['Type']:
raise NotImplementedError()
###
def __repr__(self) -> str:
data = self.dump()
data_str = ', '.join([f'{k}={v}' for k, v in data.items()])
return f'Type({data_str})'
# return prettify_obj(self.dump())
############################################################################
### hashing for values of this type
############################################################################
@property
@abstractmethod
def hash_method(self) -> str:
raise NotImplementedError()
@abstractmethod
def set_hash_method(self, method:str):
raise NotImplementedError()
class TypeWrapper(ABC):
# used for interfaces
@property
@abstractmethod
def tp(self) -> Type:
raise NotImplementedError()
################################################################################
### implementations
################################################################################
class AnyType(Type):
def __init__(self):
self._name = None
self._hash_method = CoreConfig.default_hash_method
@property
def annotation(self) -> TAny:
return typing.Any
@property
def is_compound(self) -> bool:
return False
@property
def is_concrete(self) -> bool:
return True
def dump(self) -> TAny:
return {'type': type(self).__name__}
def subtypes(self) -> TIter['Type']:
return [self]
@property
def hash_method(self) -> str:
if hasattr(self, '_hash_method'):
return self._hash_method
else:
return CoreConfig.default_hash_method
def set_hash_method(self, method:str):
self._hash_method = method
def __repr__(self) -> str:
return 'AnyType()'
class AtomType(Type):
def __init__(self, annotation:TAny):
self._name = None
self._annotation = annotation
self._hash_method = CoreConfig.default_hash_method
@property
def hash_method(self) -> str:
if hasattr(self, '_hash_method'):
return self._hash_method
else:
return CoreConfig.default_hash_method
def set_hash_method(self, method:str):
self._hash_method = method
@property
def annotation(self) -> TAny:
return self._annotation
@property
def is_compound(self) -> bool:
return False
@property
def is_concrete(self) -> bool:
return True
def dump(self) -> TAny:
return {'type': type(self).__name__,
'annotation': str(self.annotation),
'name': self.name
}
def subtypes(self) -> TIter['Type']:
return [self]
def __repr__(self) -> str:
if isinstance(self.annotation, type):
anno_repr = self.annotation.__name__
else:
anno_repr = self.annotation
if self.name is None:
return f'AtomType({anno_repr})'
else:
return f'AtomType({anno_repr}, name={self.name})'
class ListType(Type):
def __init__(self, elt_type:Type=None):
self._name = None
self._ui_name = 'list'
self._elt_type = AnyType() if elt_type is None else elt_type
self._hash_method = None
@property
def hash_method(self) -> str:
return COLLECTION_HASH_METHOD
def set_hash_method(self, method:str):
assert method == COLLECTION_HASH_METHOD
@property
def annotation(self) -> TAny:
return typing.List[self.elt_type.annotation]
@property
def elt_type(self) -> Type:
return self._elt_type
@property
def is_compound(self) -> bool:
return True
@property
def is_concrete(self) -> bool:
return self.elt_type.is_concrete
def dump(self) -> TAny:
return {'type': type(self).__name__,
'name': self.name,
'elt_type': self.elt_type.dump()}
def subtypes(self) -> TIter['Type']:
return [self] + list(self.elt_type.subtypes())
def __repr__(self) -> str:
return f'ListType(elt_type={self.elt_type}, name={self.name})'
class DictType(Type):
def __init__(self, value_type:Type=None) -> None:
self._name = None
self._ui_name = 'dict'
self._value_type = AnyType() if value_type is None else value_type
self._hash_method = None
@property
def hash_method(self) -> str:
return COLLECTION_HASH_METHOD
def set_hash_method(self, method:str):
assert method == COLLECTION_HASH_METHOD
@property
def value_type(self) -> Type:
return self._value_type
@property
def annotation(self) -> TAny:
return typing.Dict[str, self.value_type.annotation]
@property
def is_compound(self) -> bool:
return True
@property
def is_concrete(self) -> bool:
return self.value_type.is_concrete
def dump(self) -> TAny:
return {'type': type(self).__name__,
'name': self.name,
'value_type': self.value_type.dump()}
def subtypes(self) -> TIter['Type']:
return [self] + list(self.value_type.subtypes())
def __repr__(self) -> str:
return f'DictType(value_type={self.value_type}, name={self.name})'
################################################################################
### type unions
################################################################################
class UnionType(Type):
"""
Object representing a non-trivial union of types.
"""
def __init__(self, operands:TList[Type]=None) -> None:
"""
Inductive invariant:
Makes sure that the invariants of self.operands are satisfied, assuming
any union types in the input already satisfy the invariants.
"""
self._name = None
operands = [] if operands is None else operands
expanded_operands = []
for op in operands:
if isinstance(op, UnionType):
expanded_operands += op.operands
else:
expanded_operands.append(op)
unique_operands = remove_duplicates(tps=expanded_operands)
if not len(unique_operands) > 1:
raise ValueError('Cannot form union of fewer than 2 types')
self._operands = unique_operands
@property
def hash_method(self) -> str:
return COLLECTION_HASH_METHOD
def set_hash_method(self, method:str):
assert method == COLLECTION_HASH_METHOD
@property
def annotation(self) -> TAny:
things = tuple(elt.annotation for elt in self.operands)
return typing.Union[things]
@property
def operands(self) -> TList[Type]:
"""
Invariants:
- no two types are the same
- no types are union types themselves
"""
return self._operands
def associated(self) -> 'UnionType':
assoc_ops = []
for op in self.operands:
if isinstance(op, UnionType):
assoc_ops += op.associated().operands
else:
assoc_ops.append(op)
return UnionType(operands=assoc_ops)
@property
def is_compound(self) -> bool:
raise NotImplementedError()
@property
def is_concrete(self) -> bool:
return False
def dump(self) -> TAny:
return {'type': type(self).__name__,
'name': self.name,
'operands': [op.dump() for op in self.operands]}
def subtypes(self) -> TIter['Type']:
return concat_lists([list(op.subtypes()) for op in self.operands])
def __repr__(self) -> str:
return f'UnionType(operands=[{", ".join([repr(elt) for elt in self.operands])}])'
def remove_duplicates(tps:TList[Type]) -> TList[Type]:
df = pd.DataFrame({'tp': tps, 'hash': [tp.hash() for tp in tps]})
return df.groupby('hash').first()['tp'].values.tolist()
def get_union(tps:TList[Type]) -> Type:
"""
Use to avoid cases when operands reduce to 0 or 1 unique types
"""
if not tps:
logging.warning('Type union of empty collection')
return AnyType()
dedup = remove_duplicates(tps)
if len(dedup) == 1:
return dedup[0]
else:
return UnionType(operands=dedup)
################################################################################
### builtins
################################################################################
class BuiltinTypes(object): # todo - this only exists for backward compat
INT_NAME = '__int__'
STR_NAME = '__str__'
@staticmethod
def get(py_type:type) -> Type:
if py_type is int:
res = AtomType(annotation=int)
res.set_name(name=BuiltinTypes.INT_NAME)
return res
elif py_type is str:
res = AtomType(annotation=str)
res.set_name(BuiltinTypes.STR_NAME)
return res
else:
raise NotImplementedError()
################################################################################
### type utils
################################################################################
def is_subtype(s:Type, t:Type) -> bool:
"""
Return True if and only if any value that is an instance of s is also
an instance of t.
"""
if isinstance(t, AnyType):
res = True
elif isinstance(t, AtomType):
if not isinstance(s, AtomType):
res = False
else:
# importantly, *both* name and annotation must agree
res = (s.name == t.name) and (s.annotation is t.annotation)
elif isinstance(t, ListType):
if not isinstance(s, ListType):
res = False
else:
# immutable lists are covariant
res = is_subtype(s.elt_type, t.elt_type)
elif isinstance(t, DictType):
if not isinstance(s, DictType):
res = False
else:
# immutable mapping are covariant
res = is_subtype(s=s.value_type, t=t.value_type)
elif isinstance(t, UnionType):
if isinstance(s, UnionType):
res = all(is_subtype(s_operand, t) for s_operand in s.operands)
else:
res = any(is_subtype(s, t_operand) for t_operand in t.operands)
else:
raise NotImplementedError()
return res
def is_member(s:Type, t:Type) -> bool:
"""
Checks whether a concrete type precisely matches a member type in a (not
necessarily concrete) target type.
"""
assert s.is_concrete
if isinstance(t, AnyType):
return isinstance(s, AnyType)
elif isinstance(t, AtomType):
if not isinstance(s, AtomType):
return False
return (s.name == t.name)
elif isinstance(t, ListType):
if not isinstance(s, ListType):
return False
return is_member(s.elt_type, t.elt_type)
elif isinstance(t, DictType):
if not isinstance(s, DictType):
return | |
<reponame>capsulecorplab/adventofcode2019<filename>day3/test_crossed_wires_part1.py
from crossed_wires import FuelManagementSystem
import pytest
class Test1:
@pytest.fixture
def fms(self):
return FuelManagementSystem("R8,U5,L5,D3", "U7,R6,D4,L4")
@pytest.fixture
def trace_dir1(self, fms):
return fms._trace_dir(fms.wire1)
@pytest.fixture
def trace_dir2(self, fms):
return fms._trace_dir(fms.wire2)
def test_trace_dir1(self, trace_dir1):
assert trace_dir1 == [("R", 8), ("U", 5), ("L", 5), ("D", 3)]
def test_trace_dir2(self, trace_dir2):
assert trace_dir2 == [("U", 7), ("R", 6), ("D", 4), ("L", 4)]
@pytest.fixture
def mat_boundaries1(self, fms, trace_dir1):
return fms._mat_boundaries(trace_dir1)
@pytest.fixture
def mat_boundaries2(self, fms, trace_dir2):
return fms._mat_boundaries(trace_dir2)
def test_mat_boundaries1(self, mat_boundaries1):
assert mat_boundaries1 == (0, 5, 0, 8)
def test_mat_boundaries2(self, mat_boundaries2):
assert mat_boundaries2 == (0, 7, 0, 6)
@pytest.fixture()
def origin_offset(self, fms, mat_boundaries1, mat_boundaries2):
x_offset, y_offset = fms._origin_offset(mat_boundaries1, mat_boundaries2)
return x_offset, y_offset
def test_x_offset(self, fms, origin_offset):
x_offset = origin_offset[0]
assert x_offset == 0
def test_y_offset(self, fms, origin_offset):
y_offset = origin_offset[1]
assert y_offset == 0
@pytest.fixture
def trace_coord1(self, fms, trace_dir1, origin_offset):
return fms._trace_coord(trace_dir1, origin_offset)
@pytest.fixture
def trace_coord2(self, fms, trace_dir2, origin_offset):
return fms._trace_coord(trace_dir2, origin_offset)
def test_trace_coord1(self, trace_coord1):
assert trace_coord1 == [
(0, 0),
(1, 0),
(2, 0),
(3, 0),
(4, 0),
(5, 0),
(6, 0),
(7, 0),
(8, 0),
(8, 1),
(8, 2),
(8, 3),
(8, 4),
(8, 5),
(7, 5),
(6, 5),
(5, 5),
(4, 5),
(3, 5),
(3, 4),
(3, 3),
(3, 2),
]
def test_trace_coord2(self, trace_coord2):
assert trace_coord2 == [
(0, 0),
(0, 1),
(0, 2),
(0, 3),
(0, 4),
(0, 5),
(0, 6),
(0, 7),
(1, 7),
(2, 7),
(3, 7),
(4, 7),
(5, 7),
(6, 7),
(6, 6),
(6, 5),
(6, 4),
(6, 3),
(5, 3),
(4, 3),
(3, 3),
(2, 3),
]
@pytest.fixture
def mat_shape(self, fms, trace_coord1, trace_coord2):
return fms._mat_shape(trace_coord1, trace_coord2)
def test_mat_shape(self, mat_shape):
assert mat_shape == (8, 9)
@pytest.fixture
def mat_trace1(self, fms, trace_coord1, mat_shape, origin_offset):
return fms._mat_trace(trace_coord1, mat_shape, origin_offset)
@pytest.fixture
def mat_trace2(self, fms, trace_coord2, mat_shape, origin_offset):
return fms._mat_trace(trace_coord2, mat_shape, origin_offset)
def test_mat_trace1(self, trace_coord1, mat_trace1):
for x_i, y_i in trace_coord1[1:]:
assert mat_trace1[y_i][x_i] >= 1
def test_mat_trace2(self, trace_coord2, mat_trace2):
for x_i, y_i in trace_coord2[1:]:
assert mat_trace2[y_i][x_i] >= 1
@pytest.fixture
def intersections(self, fms, mat_trace1, mat_trace2):
return fms._intersections(mat_trace1, mat_trace2)
@pytest.fixture
def manhattan_dist_intersection_min(self, fms, intersections, origin_offset):
return fms._manhattan_dist_intersection_min(intersections, origin_offset)
def test_manhattan_dist_intersection_min(self, manhattan_dist_intersection_min):
assert manhattan_dist_intersection_min == 6
class Test2:
@pytest.fixture
def fms(self):
wire1 = "R75,D30,R83,U83,L12,D49,R71,U7,L72"
wire2 = "U62,R66,U55,R34,D71,R55,D58,R83"
return FuelManagementSystem(wire1, wire2)
@pytest.fixture
def trace_dir1(self, fms):
return fms._trace_dir(fms.wire1)
@pytest.fixture
def trace_dir2(self, fms):
return fms._trace_dir(fms.wire2)
def test_trace_dir1(self, trace_dir1):
assert trace_dir1 == [
("R", 75),
("D", 30),
("R", 83),
("U", 83),
("L", 12),
("D", 49),
("R", 71),
("U", 7),
("L", 72),
]
def test_trace_dir2(self, trace_dir2):
assert trace_dir2 == [
("U", 62),
("R", 66),
("U", 55),
("R", 34),
("D", 71),
("R", 55),
("D", 58),
("R", 83),
]
@pytest.fixture
def mat_boundaries1(self, fms, trace_dir1):
return fms._mat_boundaries(trace_dir1)
@pytest.fixture
def mat_boundaries2(self, fms, trace_dir2):
return fms._mat_boundaries(trace_dir2)
def test_mat_boundaries1(self, mat_boundaries1):
assert mat_boundaries1 == (0, 53, -30, 217)
def test_mat_boundaries2(self, mat_boundaries2):
assert mat_boundaries2 == (0, 117, -12, 238)
@pytest.fixture()
def origin_offset(self, fms, mat_boundaries1, mat_boundaries2):
x_offset, y_offset = fms._origin_offset(mat_boundaries1, mat_boundaries2)
return x_offset, y_offset
def test_x_offset(self, origin_offset):
x_offset = origin_offset[0]
assert x_offset == 0
def test_y_offset(self, origin_offset):
y_offset = origin_offset[1]
assert y_offset == 30
@pytest.fixture
def trace_coord1(self, fms, trace_dir1, origin_offset):
return fms._trace_coord(trace_dir1, origin_offset)
@pytest.fixture
def trace_coord2(self, fms, trace_dir2, origin_offset):
return fms._trace_coord(trace_dir2, origin_offset)
def test_trace_coord1(self, trace_coord1):
assert trace_coord1 == [
(0, 30),
(1, 30),
(2, 30),
(3, 30),
(4, 30),
(5, 30),
(6, 30),
(7, 30),
(8, 30),
(9, 30),
(10, 30),
(11, 30),
(12, 30),
(13, 30),
(14, 30),
(15, 30),
(16, 30),
(17, 30),
(18, 30),
(19, 30),
(20, 30),
(21, 30),
(22, 30),
(23, 30),
(24, 30),
(25, 30),
(26, 30),
(27, 30),
(28, 30),
(29, 30),
(30, 30),
(31, 30),
(32, 30),
(33, 30),
(34, 30),
(35, 30),
(36, 30),
(37, 30),
(38, 30),
(39, 30),
(40, 30),
(41, 30),
(42, 30),
(43, 30),
(44, 30),
(45, 30),
(46, 30),
(47, 30),
(48, 30),
(49, 30),
(50, 30),
(51, 30),
(52, 30),
(53, 30),
(54, 30),
(55, 30),
(56, 30),
(57, 30),
(58, 30),
(59, 30),
(60, 30),
(61, 30),
(62, 30),
(63, 30),
(64, 30),
(65, 30),
(66, 30),
(67, 30),
(68, 30),
(69, 30),
(70, 30),
(71, 30),
(72, 30),
(73, 30),
(74, 30),
(75, 30),
(75, 29),
(75, 28),
(75, 27),
(75, 26),
(75, 25),
(75, 24),
(75, 23),
(75, 22),
(75, 21),
(75, 20),
(75, 19),
(75, 18),
(75, 17),
(75, 16),
(75, 15),
(75, 14),
(75, 13),
(75, 12),
(75, 11),
(75, 10),
(75, 9),
(75, 8),
(75, 7),
(75, 6),
(75, 5),
(75, 4),
(75, 3),
(75, 2),
(75, 1),
(75, 0),
(76, 0),
(77, 0),
(78, 0),
(79, 0),
(80, 0),
(81, 0),
(82, 0),
(83, 0),
(84, 0),
(85, 0),
(86, 0),
(87, 0),
(88, 0),
(89, 0),
(90, 0),
(91, 0),
(92, 0),
(93, 0),
(94, 0),
(95, 0),
(96, 0),
(97, 0),
(98, 0),
(99, 0),
(100, 0),
(101, 0),
(102, 0),
(103, 0),
(104, 0),
(105, 0),
(106, 0),
(107, 0),
(108, 0),
(109, 0),
(110, 0),
(111, 0),
(112, 0),
(113, 0),
(114, 0),
(115, 0),
(116, 0),
(117, 0),
(118, 0),
(119, 0),
(120, 0),
(121, 0),
(122, 0),
(123, 0),
(124, 0),
(125, 0),
(126, 0),
(127, 0),
(128, 0),
(129, 0),
(130, 0),
(131, 0),
(132, 0),
(133, 0),
(134, 0),
(135, 0),
(136, 0),
(137, 0),
(138, 0),
(139, 0),
(140, 0),
(141, 0),
(142, 0),
(143, 0),
(144, 0),
(145, 0),
(146, 0),
(147, 0),
(148, 0),
(149, 0),
(150, 0),
(151, 0),
(152, 0),
(153, 0),
(154, 0),
(155, 0),
(156, 0),
(157, 0),
(158, 0),
(158, 1),
(158, 2),
(158, 3),
(158, 4),
(158, 5),
(158, 6),
(158, 7),
(158, 8),
(158, 9),
(158, 10),
(158, 11),
(158, 12),
(158, 13),
(158, 14),
(158, 15),
(158, 16),
(158, 17),
(158, 18),
(158, 19),
(158, 20),
(158, 21),
(158, 22),
(158, 23),
(158, 24),
(158, 25),
(158, 26),
(158, 27),
(158, 28),
(158, 29),
(158, 30),
(158, 31),
(158, 32),
(158, 33),
(158, 34),
(158, 35),
(158, 36),
(158, 37),
(158, 38),
(158, 39),
(158, 40),
(158, 41),
(158, 42),
(158, 43),
(158, 44),
(158, 45),
(158, 46),
(158, 47),
(158, 48),
(158, 49),
(158, 50),
(158, 51),
(158, 52),
(158, 53),
(158, 54),
(158, 55),
(158, 56),
(158, 57),
(158, 58),
(158, 59),
(158, 60),
(158, 61),
(158, 62),
(158, 63),
(158, 64),
(158, 65),
(158, 66),
(158, 67),
(158, 68),
(158, 69),
(158, 70),
(158, 71),
(158, 72),
(158, 73),
(158, 74),
(158, 75),
(158, 76),
(158, 77),
(158, 78),
(158, 79),
(158, 80),
(158, 81),
(158, 82),
(158, 83),
(157, 83),
(156, 83),
(155, 83),
(154, 83),
(153, 83),
(152, 83),
(151, 83),
(150, 83),
(149, 83),
(148, 83),
(147, 83),
(146, 83),
(146, 82),
(146, 81),
(146, 80),
(146, 79),
(146, 78),
(146, 77),
(146, 76),
(146, 75),
(146, 74),
(146, 73),
(146, 72),
(146, 71),
(146, 70),
(146, 69),
(146, 68),
(146, 67),
(146, 66),
(146, 65),
(146, 64),
(146, 63),
(146, 62),
(146, 61),
(146, 60),
(146, 59),
(146, 58),
(146, 57),
(146, 56),
(146, 55),
(146, 54),
(146, 53),
(146, 52),
(146, 51),
(146, 50),
(146, 49),
(146, 48),
(146, 47),
(146, 46),
(146, 45),
(146, 44),
(146, 43),
(146, 42),
(146, 41),
(146, 40),
(146, 39),
(146, 38),
(146, 37),
(146, 36),
(146, 35),
(146, 34),
(147, 34),
(148, 34),
(149, 34),
(150, 34),
(151, 34),
(152, 34),
(153, 34),
(154, 34),
(155, 34),
(156, 34),
(157, 34),
(158, 34),
(159, 34),
(160, 34),
(161, 34),
(162, 34),
(163, 34),
(164, 34),
(165, 34),
(166, 34),
(167, 34),
(168, 34),
(169, 34),
(170, 34),
(171, 34),
(172, 34),
(173, 34),
(174, 34),
(175, 34),
(176, 34),
(177, 34),
(178, 34),
(179, 34),
(180, 34),
(181, 34),
(182, 34),
(183, 34),
(184, 34),
(185, 34),
(186, 34),
(187, 34),
(188, 34),
(189, 34),
(190, 34),
(191, 34),
(192, 34),
(193, 34),
(194, 34),
(195, 34),
(196, 34),
(197, 34),
(198, 34),
(199, 34),
(200, 34),
(201, 34),
(202, 34),
| |
<reponame>annacrombie/meson<gh_stars>10-100
# Copyright 2016-2021 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess
import tempfile
import os
import shutil
import unittest
from contextlib import contextmanager
from mesonbuild.mesonlib import (
MachineChoice, is_windows, is_osx, windows_proof_rmtree, windows_proof_rm
)
from mesonbuild.compilers import (
detect_objc_compiler, detect_objcpp_compiler
)
from mesonbuild.mesonlib import EnvironmentException, MesonException
from mesonbuild.programs import ExternalProgram
from run_tests import (
get_fake_env
)
from .baseplatformtests import BasePlatformTests
from .helpers import *
@contextmanager
def no_pkgconfig():
'''
A context manager that overrides shutil.which and ExternalProgram to force
them to return None for pkg-config to simulate it not existing.
'''
old_which = shutil.which
old_search = ExternalProgram._search
def new_search(self, name, search_dir):
if name == 'pkg-config':
return [None]
return old_search(self, name, search_dir)
def new_which(cmd, *kwargs):
if cmd == 'pkg-config':
return None
return old_which(cmd, *kwargs)
shutil.which = new_which
ExternalProgram._search = new_search
try:
yield
finally:
shutil.which = old_which
ExternalProgram._search = old_search
class FailureTests(BasePlatformTests):
'''
Tests that test failure conditions. Build files here should be dynamically
generated and static tests should go into `test cases/failing*`.
This is useful because there can be many ways in which a particular
function can fail, and creating failing tests for all of them is tedious
and slows down testing.
'''
dnf = "[Dd]ependency.*not found(:.*)?"
nopkg = '[Pp]kg-config.*not found'
def setUp(self):
super().setUp()
self.srcdir = os.path.realpath(tempfile.mkdtemp())
self.mbuild = os.path.join(self.srcdir, 'meson.build')
self.moptions = os.path.join(self.srcdir, 'meson_options.txt')
def tearDown(self):
super().tearDown()
windows_proof_rmtree(self.srcdir)
def assertMesonRaises(self, contents, match, *,
extra_args=None,
langs=None,
meson_version=None,
options=None,
override_envvars=None):
'''
Assert that running meson configure on the specified @contents raises
a error message matching regex @match.
'''
if langs is None:
langs = []
with open(self.mbuild, 'w', encoding='utf-8') as f:
f.write("project('failure test', 'c', 'cpp'")
if meson_version:
f.write(f", meson_version: '{meson_version}'")
f.write(")\n")
for lang in langs:
f.write(f"add_languages('{lang}', required : false)\n")
f.write(contents)
if options is not None:
with open(self.moptions, 'w', encoding='utf-8') as f:
f.write(options)
o = {'MESON_FORCE_BACKTRACE': '1'}
if override_envvars is None:
override_envvars = o
else:
override_envvars.update(o)
# Force tracebacks so we can detect them properly
with self.assertRaisesRegex(MesonException, match, msg=contents):
# Must run in-process or we'll get a generic CalledProcessError
self.init(self.srcdir, extra_args=extra_args,
inprocess=True,
override_envvars = override_envvars)
def obtainMesonOutput(self, contents, match, extra_args, langs, meson_version=None):
if langs is None:
langs = []
with open(self.mbuild, 'w', encoding='utf-8') as f:
f.write("project('output test', 'c', 'cpp'")
if meson_version:
f.write(f", meson_version: '{meson_version}'")
f.write(")\n")
for lang in langs:
f.write(f"add_languages('{lang}', required : false)\n")
f.write(contents)
# Run in-process for speed and consistency with assertMesonRaises
return self.init(self.srcdir, extra_args=extra_args, inprocess=True)
def assertMesonOutputs(self, contents, match, extra_args=None, langs=None, meson_version=None):
'''
Assert that running meson configure on the specified @contents outputs
something that matches regex @match.
'''
out = self.obtainMesonOutput(contents, match, extra_args, langs, meson_version)
self.assertRegex(out, match)
def assertMesonDoesNotOutput(self, contents, match, extra_args=None, langs=None, meson_version=None):
'''
Assert that running meson configure on the specified @contents does not output
something that matches regex @match.
'''
out = self.obtainMesonOutput(contents, match, extra_args, langs, meson_version)
self.assertNotRegex(out, match)
@skipIfNoPkgconfig
def test_dependency(self):
if subprocess.call(['pkg-config', '--exists', 'zlib']) != 0:
raise unittest.SkipTest('zlib not found with pkg-config')
a = (("dependency('zlib', method : 'fail')", "'fail' is invalid"),
("dependency('zlib', static : '1')", "[Ss]tatic.*boolean"),
("dependency('zlib', version : 1)", "Item must be a list or one of <class 'str'>"),
("dependency('zlib', required : 1)", "[Rr]equired.*boolean"),
("dependency('zlib', method : 1)", "[Mm]ethod.*string"),
("dependency('zlibfail')", self.dnf),)
for contents, match in a:
self.assertMesonRaises(contents, match)
def test_apple_frameworks_dependency(self):
if not is_osx():
raise unittest.SkipTest('only run on macOS')
self.assertMesonRaises("dependency('appleframeworks')",
"requires at least one module")
def test_extraframework_dependency_method(self):
code = "dependency('metal', method : 'extraframework')"
if not is_osx():
self.assertMesonRaises(code, self.dnf)
else:
# metal framework is always available on macOS
self.assertMesonOutputs(code, '[Dd]ependency.*metal.*found.*YES')
def test_sdl2_notfound_dependency(self):
# Want to test failure, so skip if available
if shutil.which('sdl2-config'):
raise unittest.SkipTest('sdl2-config found')
self.assertMesonRaises("dependency('sdl2', method : 'sdlconfig')", self.dnf)
if shutil.which('pkg-config'):
self.assertMesonRaises("dependency('sdl2', method : 'pkg-config')", self.dnf)
with no_pkgconfig():
# Look for pkg-config, cache it, then
# Use cached pkg-config without erroring out, then
# Use cached pkg-config to error out
code = "dependency('foobarrr', method : 'pkg-config', required : false)\n" \
"dependency('foobarrr2', method : 'pkg-config', required : false)\n" \
"dependency('sdl2', method : 'pkg-config')"
self.assertMesonRaises(code, self.nopkg)
def test_gnustep_notfound_dependency(self):
# Want to test failure, so skip if available
if shutil.which('gnustep-config'):
raise unittest.SkipTest('gnustep-config found')
self.assertMesonRaises("dependency('gnustep')",
f"(requires a Objc compiler|{self.dnf})",
langs = ['objc'])
def test_wx_notfound_dependency(self):
# Want to test failure, so skip if available
if shutil.which('wx-config-3.0') or shutil.which('wx-config') or shutil.which('wx-config-gtk3'):
raise unittest.SkipTest('wx-config, wx-config-3.0 or wx-config-gtk3 found')
self.assertMesonRaises("dependency('wxwidgets')", self.dnf)
self.assertMesonOutputs("dependency('wxwidgets', required : false)",
"Run-time dependency .*WxWidgets.* found: .*NO.*")
def test_wx_dependency(self):
if not shutil.which('wx-config-3.0') and not shutil.which('wx-config') and not shutil.which('wx-config-gtk3'):
raise unittest.SkipTest('Neither wx-config, wx-config-3.0 nor wx-config-gtk3 found')
self.assertMesonRaises("dependency('wxwidgets', modules : 1)",
"module argument is not a string")
def test_llvm_dependency(self):
self.assertMesonRaises("dependency('llvm', modules : 'fail')",
f"(required.*fail|{self.dnf})")
def test_boost_notfound_dependency(self):
# Can be run even if Boost is found or not
self.assertMesonRaises("dependency('boost', modules : 1)",
"module.*not a string")
self.assertMesonRaises("dependency('boost', modules : 'fail')",
f"(fail.*not found|{self.dnf})")
def test_boost_BOOST_ROOT_dependency(self):
# Test BOOST_ROOT; can be run even if Boost is found or not
self.assertMesonRaises("dependency('boost')",
f"(boost_root.*absolute|{self.dnf})",
override_envvars = {'BOOST_ROOT': 'relative/path'})
def test_dependency_invalid_method(self):
code = '''zlib_dep = dependency('zlib', required : false)
zlib_dep.get_configtool_variable('foo')
'''
self.assertMesonRaises(code, ".* is not a config-tool dependency")
code = '''zlib_dep = dependency('zlib', required : false)
dep = declare_dependency(dependencies : zlib_dep)
dep.get_pkgconfig_variable('foo')
'''
self.assertMesonRaises(code, "Method.*pkgconfig.*is invalid.*internal")
code = '''zlib_dep = dependency('zlib', required : false)
dep = declare_dependency(dependencies : zlib_dep)
dep.get_configtool_variable('foo')
'''
self.assertMesonRaises(code, "Method.*configtool.*is invalid.*internal")
def test_objc_cpp_detection(self):
'''
Test that when we can't detect objc or objcpp, we fail gracefully.
'''
env = get_fake_env()
try:
detect_objc_compiler(env, MachineChoice.HOST)
detect_objcpp_compiler(env, MachineChoice.HOST)
except EnvironmentException:
code = "add_languages('objc')\nadd_languages('objcpp')"
self.assertMesonRaises(code, "Unknown compiler")
return
raise unittest.SkipTest("objc and objcpp found, can't test detection failure")
def test_subproject_variables(self):
'''
Test that:
1. The correct message is outputted when a not-required dep is not
found and the fallback subproject is also not found.
2. A not-required fallback dependency is not found because the
subproject failed to parse.
3. A not-found not-required dep with a fallback subproject outputs the
correct message when the fallback subproject is found but the
variable inside it is not.
4. A fallback dependency is found from the subproject parsed in (3)
5. A wrap file from a subproject is used but fails because it does not
contain required keys.
'''
tdir = os.path.join(self.unit_test_dir, '20 subproj dep variables')
stray_file = os.path.join(tdir, 'subprojects/subsubproject.wrap')
if os.path.exists(stray_file):
windows_proof_rm(stray_file)
out = self.init(tdir, inprocess=True)
self.assertRegex(out, r"Neither a subproject directory nor a .*nosubproj.wrap.* file was found")
self.assertRegex(out, r'Function does not take positional arguments.')
self.assertRegex(out, r'Dependency .*somenotfounddep.* from subproject .*subprojects/somesubproj.* found: .*NO.*')
self.assertRegex(out, r'Dependency .*zlibproxy.* from subproject .*subprojects.*somesubproj.* found: .*YES.*')
self.assertRegex(out, r'Missing key .*source_filename.* in subsubproject.wrap')
windows_proof_rm(stray_file)
def test_exception_exit_status(self):
'''
Test exit status on python exception
'''
tdir = os.path.join(self.unit_test_dir, '21 exit status')
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.init(tdir, inprocess=False, override_envvars = {'MESON_UNIT_TEST': '1', 'MESON_FORCE_BACKTRACE': ''})
self.assertEqual(cm.exception.returncode, 2)
self.wipe()
def test_dict_requires_key_value_pairs(self):
self.assertMesonRaises("dict = {3, 'foo': 'bar'}",
'Only key:value pairs are valid in dict construction.')
self.assertMesonRaises("{'foo': 'bar', 3}",
'Only key:value pairs are valid in dict construction.')
def test_dict_forbids_duplicate_keys(self):
self.assertMesonRaises("dict = {'a': 41, 'a': 42}",
'Duplicate dictionary key: a.*')
def test_dict_forbids_integer_key(self):
self.assertMesonRaises("dict = {3: 'foo'}",
'Key must be a string.*')
def test_using_too_recent_feature(self):
# Here we use a dict, which was introduced in 0.47.0
self.assertMesonOutputs("dict = {}",
".*WARNING.*Project targets.*but.*",
meson_version='>= 0.46.0')
def test_using_recent_feature(self):
# Same as above, except the meson version is now appropriate
self.assertMesonDoesNotOutput("dict = {}",
".*WARNING.*Project targets.*but.*",
meson_version='>= 0.47')
def test_using_too_recent_feature_dependency(self):
self.assertMesonOutputs("dependency('pcap', required: false)",
".*WARNING.*Project targets.*but.*",
meson_version='>= 0.41.0')
def test_vcs_tag_featurenew_build_always_stale(self):
'https://github.com/mesonbuild/meson/issues/3904'
vcs_tag = '''version_data = configuration_data()
version_data.set('PROJVER', '@VCS_TAG@')
vf = configure_file(output : 'version.h.in', configuration: version_data)
f = vcs_tag(input : vf, output : 'version.h')
'''
msg = '.*WARNING:.*feature.*build_always_stale.*custom_target.*'
self.assertMesonDoesNotOutput(vcs_tag, msg, meson_version='>=0.43')
def test_missing_subproject_not_required_and_required(self):
self.assertMesonRaises("sub1 = subproject('not-found-subproject', required: false)\n" +
"sub2 = subproject('not-found-subproject', required: true)",
""".*Subproject "subprojects/not-found-subproject" required but not found.*""")
def test_get_variable_on_not_found_project(self):
self.assertMesonRaises("sub1 = subproject('not-found-subproject', required: false)\n" +
"sub1.get_variable('naaa')",
"""Subproject "subprojects/not-found-subproject" disabled can't get_variable on it.""")
def test_version_checked_before_parsing_options(self):
'''
https://github.com/mesonbuild/meson/issues/5281
'''
options | |
"""
mp7 module. Contains the Modpath7List and Modpath7 classes.
"""
import numpy as np
from ..mbase import BaseModel
from ..modflow import Modflow
from ..mf6 import MFModel
from ..pakbase import Package
from .mp7bas import Modpath7Bas
from .mp7sim import Modpath7Sim
from .mp7particledata import CellDataType, NodeParticleData
from .mp7particlegroup import ParticleGroupNodeTemplate
import os
class Modpath7List(Package):
"""
List package class
"""
def __init__(self, model, extension="list", unitnumber=None):
"""
Package constructor.
"""
if unitnumber is None:
unitnumber = model.next_unit()
# Call ancestor's init to set self.parent, extension, name and
# unit number
Package.__init__(self, model, extension, "LIST", unitnumber)
# self.parent.add_package(self) This package is not added to the base
# model so that it is not included in get_name_file_entries()
return
def write_file(self):
# Not implemented for list class
return
class Modpath7(BaseModel):
"""
Modpath 7 class.
Parameters
----------
modelname : str, default "modpath7test"
Basename for MODPATH 7 input and output files.
simfile_ext : str, default "mpsim"
Filename extension of the MODPATH 7 simulation file.
namefile_ext : str, default mpnam"
Filename extension of the MODPATH 7 namefile.
version : str, default "modpath7"
String that defines the MODPATH version. Valid versions are
"modpath7" (default).
exe_name : str, default "mp7.exe"
The name of the executable to use.
flowmodel : flopy.modflow.Modflow or flopy.mf6.MFModel object
MODFLOW model object.
headfilename : str, optional
Filename of the MODFLOW output head file. If headfilename is
not provided then it will be set from the flowmodel.
budgetfilename : str, optional
Filename of the MODFLOW output cell-by-cell budget file.
If budgetfilename is not provided then it will be set
from the flowmodel.
model_ws : str, default "."
Model workspace. Directory name to create model data sets.
Default is the current working directory.
verbose : bool, default False
Print additional information to the screen.
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow.load('mf2005.nam')
>>> mp = flopy.modpath.Modpath7('mf2005_mp', flowmodel=m)
"""
def __init__(
self,
modelname="modpath7test",
simfile_ext="mpsim",
namefile_ext="mpnam",
version="modpath7",
exe_name="mp7.exe",
flowmodel=None,
headfilename=None,
budgetfilename=None,
model_ws=None,
verbose=False,
):
super().__init__(
modelname,
simfile_ext,
exe_name,
model_ws=model_ws,
verbose=verbose,
)
self.version_types = {"modpath7": "MODPATH 7"}
self.set_version(version)
self.lst = Modpath7List(self)
self.mpnamefile = "{}.{}".format(self.name, namefile_ext)
self.mpbas_file = "{}.mpbas".format(modelname)
if not isinstance(flowmodel, (Modflow, MFModel)):
raise TypeError(
"Modpath7: flow model is not an instance of "
"flopy.modflow.Modflow or flopy.mf6.MFModel. "
"Passed object of type {}".format(type(flowmodel))
)
# if a MFModel instance ensure flowmodel is a MODFLOW 6 GWF model
if isinstance(flowmodel, MFModel):
if (
flowmodel.model_type != "gwf"
and flowmodel.model_type != "gwf6"
):
raise TypeError(
"Modpath7: flow model type must be gwf. "
"Passed model_type is {}.".format(flowmodel.model_type)
)
# set flowmodel and flow_version attributes
self.flowmodel = flowmodel
self.flow_version = self.flowmodel.version
if self.flow_version == "mf6":
# get discretization package
ibound = None
dis = self.flowmodel.get_package("DIS")
if dis is None:
raise Exception(
"DIS, DISV, or DISU packages must be "
"included in the passed MODFLOW 6 model"
)
else:
if dis.package_name.lower() == "dis":
nlay, nrow, ncol = (
dis.nlay.array,
dis.nrow.array,
dis.ncol.array,
)
shape = (nlay, nrow, ncol)
elif dis.package_name.lower() == "disv":
nlay, ncpl = dis.nlay.array, dis.ncpl.array
shape = (nlay, ncpl)
elif dis.package_name.lower() == "disu":
nodes = dis.nodes.array
shape = tuple(
nodes,
)
else:
raise TypeError(
"DIS, DISV, or DISU packages must be "
"included in the passed MODFLOW 6 model"
)
# terminate (for now) if mf6 model does not use dis or disv
if len(shape) < 2:
raise TypeError(
"DIS and DISV are currently the only supported "
"MODFLOW 6 discretization packages that can be "
"used with MODPATH 7"
)
# set ib
ib = dis.idomain.array
# set all ib to active if ib is not defined
if ib is None:
ib = np.ones(shape, np.int32)
# set dis and grbdis file name
dis_file = None
grbdis_file = dis.filename + ".grb"
grbtag = "GRB{}".format(dis.package_name.upper())
tdis = self.flowmodel.simulation.get_package("TDIS")
if tdis is None:
raise Exception(
"TDIS package must be "
"included in the passed MODFLOW 6 model"
)
tdis_file = tdis.filename
# get stress period data
nper = tdis.nper.array
perlen = []
nstp = []
v = tdis.perioddata.array
for pl, ns, tsmult in v:
perlen.append(pl)
nstp.append(ns)
perlen = np.array(perlen, dtype=np.float32)
nstp = np.array(nstp, dtype=np.int32)
# get oc file
oc = self.flowmodel.get_package("OC")
if oc is not None:
# set head file name
if headfilename is None:
headfilename = oc.head_filerecord.array["headfile"][0]
# set budget file name
if budgetfilename is None:
budgetfilename = oc.budget_filerecord.array["budgetfile"][
0
]
else:
shape = None
# extract data from DIS or DISU files and set shape
dis = self.flowmodel.get_package("DIS")
if dis is None:
dis = self.flowmodel.get_package("DISU")
elif dis is not None and shape is None:
nlay, nrow, ncol = dis.nlay, dis.nrow, dis.ncol
shape = (nlay, nrow, ncol)
if dis is None:
raise Exception(
"DIS, or DISU packages must be "
"included in the passed MODFLOW model"
)
elif dis is not None and shape is None:
nlay, nodes = dis.nlay, dis.nodes
shape = (nodes,)
# terminate (for now) if mf6 model does not use dis
if len(shape) != 3:
raise Exception(
"DIS currently the only supported MODFLOW "
"discretization package that can be used with MODPATH 7"
)
# get stress period data
nper = dis.nper
perlen = dis.perlen.array
nstp = dis.nstp.array
# set dis_file
dis_file = dis.file_name[0]
# set grbdis_file
grbdis_file = None
grbtag = None
# set tdis_file
tdis_file = None
# set head file name
if headfilename is None:
iu = self.flowmodel.oc.iuhead
headfilename = self.flowmodel.get_output(unit=iu)
# get discretization package
p = self.flowmodel.get_package("LPF")
if p is None:
p = self.flowmodel.get_package("BCF6")
if p is None:
p = self.flowmodel.get_package("UPW")
if p is None:
raise Exception(
"LPF, BCF6, or UPW packages must be "
"included in the passed MODFLOW model"
)
# set budget file name
if budgetfilename is None:
iu = p.ipakcb
budgetfilename = self.flowmodel.get_output(unit=iu)
# set hnoflo and ibound from BAS6 package
bas = self.flowmodel.get_package("BAS6")
ib = bas.ibound.array
# reset to constant values if possible
ibound = []
for k in range(shape[0]):
i = ib[k].flatten()
if np.all(i == i[0]):
kval = i[0]
else:
kval = ib[k]
ibound.append(kval)
# set dis_file and tdis_file
self.shape = shape
self.dis_file = dis_file
self.grbdis_file = grbdis_file
self.grbtag = grbtag
self.tdis_file = tdis_file
# set temporal data
self.nper = nper
self.time_end = perlen.sum()
self.perlen = perlen
self.nstp = nstp
# set output file names
self.headfilename = headfilename
self.budgetfilename = budgetfilename
# make sure the valid files are available
if self.headfilename is None:
raise ValueError(
"the head file in the MODFLOW model or passed "
"to __init__ cannot be None"
)
if self.budgetfilename is None:
raise ValueError(
"the budget file in the MODFLOW model or passed "
"to __init__ cannot be None"
)
if self.dis_file is None and self.grbdis_file is None:
raise ValueError(
"the dis file in the MODFLOW model or passed "
"to __init__ cannot be None"
)
# set ib and ibound
self.ib = ib
self.ibound = ibound
# set file attributes
self.array_free_format = True
self.array_format = "modflow"
self.external = False
return
def __repr__(self):
return "MODPATH 7 model"
@property
def laytyp(self):
if self.flowmodel.version == "mf6":
icelltype = self.flowmodel.npf.icelltype.array
laytyp = [
icelltype[k].max()
for k in range(self.flowmodel.modelgrid.nlay)
]
else:
p = self.flowmodel.get_package("BCF6")
if p is None:
laytyp = self.flowmodel.laytyp
else:
laytyp = p.laycon.array
return np.array(laytyp, dtype=np.int32)
@property
def hdry(self):
if self.flowmodel.version == "mf6":
return None
else:
return self.flowmodel.hdry
@property
def hnoflo(self):
if self.flowmodel.version == "mf6":
return None
else:
return self.flowmodel.hnoflo
def write_name_file(self):
"""
Write the name file
Returns
-------
None
"""
fpth = os.path.join(self.model_ws, self.mpnamefile)
f = open(fpth, "w")
f.write("{}\n".format(self.heading))
if self.mpbas_file is not None:
f.write("{:10s} {}\n".format("MPBAS", self.mpbas_file))
if self.dis_file is not None:
f.write("{:10s} {}\n".format("DIS", self.dis_file))
if self.grbdis_file is not None:
f.write("{:10s} {}\n".format(self.grbtag, self.grbdis_file))
if self.tdis_file is not None:
f.write("{:10s} {}\n".format("TDIS", self.tdis_file))
if self.headfilename is not None:
f.write("{:10s} {}\n".format("HEAD", self.headfilename))
if self.budgetfilename is not None:
f.write("{:10s} {}\n".format("BUDGET", self.budgetfilename))
f.close()
@classmethod
def create_mp7(
cls,
modelname="modpath7test",
trackdir="forward",
flowmodel=None,
exe_name="mp7",
model_ws=".",
verbose=False,
columncelldivisions=2,
rowcelldivisions=2,
layercelldivisions=2,
nodes=None,
):
"""
Create a default MODPATH 7 model using a passed flowmodel with
8 particles in user-specified node locations or every active model
cell.
Parameters
----------
modelname : str
Basename for MODPATH 7 input | |
<gh_stars>0
import os
import json
import tempfile
import shutil
from django.core.files.uploadedfile import UploadedFile
from django.test import TestCase, RequestFactory
from django.contrib.auth.models import Group
from django.core.urlresolvers import reverse
from rest_framework import status
from hs_core import hydroshare
from hs_core.hydroshare.utils import resource_post_create_actions
from hs_core.testing import MockIRODSTestCaseMixin
from hs_file_types.views import set_file_type, add_metadata_element, update_metadata_element, \
update_key_value_metadata, delete_key_value_metadata, add_keyword_metadata, \
delete_keyword_metadata, update_netcdf_file
from hs_file_types.models import GeoRasterLogicalFile, NetCDFLogicalFile
class TestFileTypeViewFunctions(MockIRODSTestCaseMixin, TestCase):
def setUp(self):
super(TestFileTypeViewFunctions, self).setUp()
self.group, _ = Group.objects.get_or_create(name='Resource Author')
self.username = 'john'
self.password = '<PASSWORD>'
self.user = hydroshare.create_account(
'<EMAIL>',
username=self.username,
first_name='John',
last_name='Clarson',
superuser=False,
password=self.password,
groups=[]
)
self.composite_resource = hydroshare.create_resource(
resource_type='CompositeResource',
owner=self.user,
title='Test Raster File Metadata'
)
self.factory = RequestFactory()
self.temp_dir = tempfile.mkdtemp()
self.raster_file_name = 'small_logan.tif'
self.raster_file = 'hs_file_types/tests/{}'.format(self.raster_file_name)
target_temp_raster_file = os.path.join(self.temp_dir, self.raster_file_name)
shutil.copy(self.raster_file, target_temp_raster_file)
self.raster_file_obj = open(target_temp_raster_file, 'r')
self.netcdf_file_name = 'netcdf_valid.nc'
self.netcdf_file = 'hs_file_types/tests/{}'.format(self.netcdf_file_name)
target_temp_netcdf_file = os.path.join(self.temp_dir, self.netcdf_file_name)
shutil.copy(self.netcdf_file, target_temp_netcdf_file)
self.netcdf_file_obj = open(target_temp_netcdf_file, 'r')
def tearDown(self):
super(TestFileTypeViewFunctions, self).tearDown()
if os.path.exists(self.temp_dir):
shutil.rmtree(self.temp_dir)
def test_set_raster_file_type(self):
# here we are using a valid raster tif file for setting it
# to Geo Raster file type which includes metadata extraction
self.raster_file_obj = open(self.raster_file, 'r')
self._create_composite_resource(self.raster_file_obj)
self.assertEqual(self.composite_resource.files.all().count(), 1)
res_file = self.composite_resource.files.first()
# check that the resource file is associated with GenericLogicalFile
self.assertEqual(res_file.has_logical_file, True)
self.assertEqual(res_file.logical_file_type_name, "GenericLogicalFile")
url_params = {'resource_id': self.composite_resource.short_id,
'file_id': res_file.id,
'hs_file_type': 'GeoRaster'
}
url = reverse('set_file_type', kwargs=url_params)
request = self.factory.post(url)
request.user = self.user
# this is the view function we are testing
response = set_file_type(request, resource_id=self.composite_resource.short_id,
file_id=res_file.id, hs_file_type='GeoRaster')
self.assertEqual(response.status_code, status.HTTP_200_OK)
response_dict = json.loads(response.content)
self.assertIn("File was successfully set to selected file type.",
response_dict['message'])
# there should be 2 file now (vrt file was generated by the system
self.assertEqual(self.composite_resource.files.all().count(), 2)
res_file = self.composite_resource.files.first()
self.assertEqual(res_file.logical_file_type_name, "GeoRasterLogicalFile")
self.composite_resource.delete()
def test_set_netcdf_file_type(self):
# here we are using a valid netcdf file for setting it
# to NetCDF file type which includes metadata extraction
self.netcdf_file_obj = open(self.netcdf_file, 'r')
self._create_composite_resource(self.netcdf_file_obj)
self.assertEqual(self.composite_resource.files.all().count(), 1)
res_file = self.composite_resource.files.first()
# check that the resource file is associated with GenericLogicalFile
self.assertEqual(res_file.has_logical_file, True)
self.assertEqual(res_file.logical_file_type_name, "GenericLogicalFile")
url_params = {'resource_id': self.composite_resource.short_id,
'file_id': res_file.id,
'hs_file_type': 'NetCDF'
}
url = reverse('set_file_type', kwargs=url_params)
request = self.factory.post(url)
request.user = self.user
# this is the view function we are testing
response = set_file_type(request, resource_id=self.composite_resource.short_id,
file_id=res_file.id, hs_file_type='NetCDF')
self.assertEqual(response.status_code, status.HTTP_200_OK)
response_dict = json.loads(response.content)
self.assertIn("File was successfully set to selected file type.",
response_dict['message'])
# there should be 2 file now (vrt file was generated by the system
self.assertEqual(self.composite_resource.files.all().count(), 2)
res_file = self.composite_resource.files.first()
self.assertEqual(res_file.logical_file_type_name, "NetCDFLogicalFile")
self.composite_resource.delete()
def test_add_update_metadata_to_raster_file_type(self):
self.raster_file_obj = open(self.raster_file, 'r')
self._create_composite_resource(self.raster_file_obj)
res_file = self.composite_resource.files.first()
# set the tif file to GeoRasterFile type
GeoRasterLogicalFile.set_file_type(self.composite_resource, res_file.id, self.user)
res_file = self.composite_resource.files.first()
logical_file = res_file.logical_file
self.assertEqual(res_file.logical_file_type_name, "GeoRasterLogicalFile")
# no temporal coverage for the raster file type yet
self.assertEqual(logical_file.metadata.temporal_coverage, None)
# add temporal coverage
url_params = {'hs_file_type': 'GeoRasterLogicalFile',
'file_type_id': logical_file.id,
'element_name': 'coverage'
}
url = reverse('add_file_metadata', kwargs=url_params)
request = self.factory.post(url, data={'start': '1/1/2010', 'end': '12/12/2015'})
request.user = self.user
# this is the view function we are testing
response = add_metadata_element(request, hs_file_type="GeoRasterLogicalFile",
file_type_id=logical_file.id, element_name='coverage')
self.assertEqual(response.status_code, status.HTTP_200_OK)
response_dict = json.loads(response.content)
self.assertEqual('success', response_dict['status'])
# now the raster file should have temporal coverage element
self.assertNotEqual(logical_file.metadata.temporal_coverage, None)
# test updating temporal coverage
url_params['element_id'] = logical_file.metadata.temporal_coverage.id
url = reverse('update_file_metadata', kwargs=url_params)
request = self.factory.post(url, data={'start': '1/1/2011', 'end': '12/12/2016'})
request.user = self.user
# this is the view function we are testing
response = update_metadata_element(request, hs_file_type="GeoRasterLogicalFile",
file_type_id=logical_file.id, element_name='coverage',
element_id=logical_file.metadata.temporal_coverage.id)
self.assertEqual(response.status_code, status.HTTP_200_OK)
response_dict = json.loads(response.content)
self.assertEqual('success', response_dict['status'])
temporal_coverage = logical_file.metadata.temporal_coverage
self.assertEqual(temporal_coverage.value['start'], '2011-01-01')
self.assertEqual(temporal_coverage.value['end'], '2016-12-12')
self.composite_resource.delete()
def test_add_update_metadata_to_netcdf_file_type(self):
self.netcdf_file_obj = open(self.netcdf_file, 'r')
self._create_composite_resource(self.netcdf_file_obj)
res_file = self.composite_resource.files.first()
# set the nc file to NetCDF File type
NetCDFLogicalFile.set_file_type(self.composite_resource, res_file.id, self.user)
res_file = self.composite_resource.files.first()
logical_file = res_file.logical_file
self.assertEqual(res_file.logical_file_type_name, "NetCDFLogicalFile")
# there should be temporal coverage for the netcdf file type
self.assertNotEqual(logical_file.metadata.temporal_coverage, None)
temporal_coverage = logical_file.metadata.temporal_coverage
self.assertEqual(temporal_coverage.value['start'], '2009-10-01 00:00:00')
self.assertEqual(temporal_coverage.value['end'], '2010-05-30 23:00:00')
url_params = {'hs_file_type': 'NetCDFLogicalFile',
'file_type_id': logical_file.id,
'element_name': 'coverage',
'element_id': logical_file.metadata.temporal_coverage.id
}
# test updating temporal coverage
url = reverse('update_file_metadata', kwargs=url_params)
request = self.factory.post(url, data={'start': '1/1/2011', 'end': '12/12/2016'})
request.user = self.user
# this is the view function we are testing
response = update_metadata_element(request, hs_file_type="NetCDFLogicalFile",
file_type_id=logical_file.id, element_name='coverage',
element_id=logical_file.metadata.temporal_coverage.id)
self.assertEqual(response.status_code, status.HTTP_200_OK)
response_dict = json.loads(response.content)
self.assertEqual('success', response_dict['status'])
temporal_coverage = logical_file.metadata.temporal_coverage
self.assertEqual(temporal_coverage.value['start'], '2011-01-01')
self.assertEqual(temporal_coverage.value['end'], '2016-12-12')
# test updating OriginalCoverage element
# there should be original coverage for the netcdf file type
self.assertNotEqual(logical_file.metadata.original_coverage, None)
orig_coverage = logical_file.metadata.original_coverage
self.assertEqual(orig_coverage.value['northlimit'], '4.63515e+06')
coverage_data = {'northlimit': '111.333', 'southlimit': '42.678', 'eastlimit': '123.789',
'westlimit': '40.789', 'units': 'meters'}
url_params['element_name'] = 'originalcoverage'
url_params['element_id'] = logical_file.metadata.original_coverage.id
url = reverse('update_file_metadata', kwargs=url_params)
request = self.factory.post(url, data=coverage_data)
request.user = self.user
# this is the view function we are testing
response = update_metadata_element(request, hs_file_type="NetCDFLogicalFile",
file_type_id=logical_file.id,
element_name='originalcoverage',
element_id=logical_file.metadata.original_coverage.id)
self.assertEqual(response.status_code, status.HTTP_200_OK)
response_dict = json.loads(response.content)
self.assertEqual('success', response_dict['status'])
orig_coverage = logical_file.metadata.original_coverage
self.assertEqual(orig_coverage.value['northlimit'], '111.333')
# test updating spatial coverage
# there should be spatial coverage for the netcdf file type
self.assertNotEqual(logical_file.metadata.spatial_coverage, None)
spatial_coverage = logical_file.metadata.spatial_coverage
self.assertEqual(spatial_coverage.value['northlimit'], 41.867126409)
coverage_data = {'type': 'box', 'projection': 'WGS 84 EPSG:4326', 'northlimit': '41.87',
'southlimit': '41.863',
'eastlimit': '-111.505',
'westlimit': '-111.511', 'units': 'meters'}
url_params['element_name'] = 'coverage'
url_params['element_id'] = spatial_coverage.id
url = reverse('update_file_metadata', kwargs=url_params)
request = self.factory.post(url, data=coverage_data)
request.user = self.user
# this is the view function we are testing
response = update_metadata_element(request, hs_file_type="NetCDFLogicalFile",
file_type_id=logical_file.id,
element_name='coverage',
element_id=spatial_coverage.id)
self.assertEqual(response.status_code, status.HTTP_200_OK)
response_dict = json.loads(response.content)
self.assertEqual('success', response_dict['status'])
spatial_coverage = logical_file.metadata.spatial_coverage
self.assertEqual(spatial_coverage.value['northlimit'], 41.87)
# test update Variable element
variable = logical_file.metadata.variables.first()
variable_data = {'name': 'variable_name_updated', 'type': 'Int', 'unit': 'deg F',
'shape': 'variable_shape'}
url_params['element_name'] = 'variable'
url_params['element_id'] = variable.id
url = reverse('update_file_metadata', kwargs=url_params)
request = self.factory.post(url, data=variable_data)
request.user = self.user
# this is the view function we are testing
response = update_metadata_element(request, hs_file_type="NetCDFLogicalFile",
file_type_id=logical_file.id,
element_name='variable',
element_id=variable.id)
self.assertEqual(response.status_code, status.HTTP_200_OK)
response_dict = json.loads(response.content)
self.assertEqual('success', response_dict['status'])
variable = logical_file.metadata.variables.all().filter(id=variable.id).first()
self.assertEqual(variable.name, 'variable_name_updated')
self.composite_resource.delete()
def test_CRUD_key_value_metadata_raster_file_type(self):
self.raster_file_obj = open(self.raster_file, 'r')
self._create_composite_resource(self.raster_file_obj)
res_file = self.composite_resource.files.first()
# set the tif file to GeoRasterFile type
GeoRasterLogicalFile.set_file_type(self.composite_resource, res_file.id, self.user)
res_file = self.composite_resource.files.first()
logical_file = res_file.logical_file
self.assertEqual(res_file.logical_file_type_name, "GeoRasterLogicalFile")
# no key/value metadata for the raster file type yet
self.assertEqual(logical_file.metadata.extra_metadata, {})
url_params = {'hs_file_type': 'GeoRasterLogicalFile',
'file_type_id': logical_file.id
}
url = reverse('update_file_keyvalue_metadata', kwargs=url_params)
request = self.factory.post(url, data={'key': 'key-1', 'value': 'value-1'})
request.user = self.user
# this is the view function we are testing
response = update_key_value_metadata(request, hs_file_type="GeoRasterLogicalFile",
file_type_id=logical_file.id)
self.assertEqual(response.status_code, status.HTTP_200_OK)
response_dict = json.loads(response.content)
self.assertEqual('success', response_dict['status'])
# there should be key/value metadata for the raster file type yet
res_file = self.composite_resource.files.first()
logical_file = res_file.logical_file
self.assertNotEqual(logical_file.metadata.extra_metadata, {})
self.assertEqual(logical_file.metadata.extra_metadata['key-1'], 'value-1')
# update existing key value metadata - updating both key and value
request = self.factory.post(url, data={'key': 'key-2', 'value': 'value-2',
'key_original': 'key-1'})
request.user = self.user
response = update_key_value_metadata(request, hs_file_type="GeoRasterLogicalFile",
file_type_id=logical_file.id)
self.assertEqual(response.status_code, status.HTTP_200_OK)
response_dict = json.loads(response.content)
self.assertEqual('success', response_dict['status'])
res_file = self.composite_resource.files.first()
logical_file = res_file.logical_file
self.assertEqual(logical_file.metadata.extra_metadata['key-2'], 'value-2')
self.assertNotIn('key-1', logical_file.metadata.extra_metadata.keys())
# update existing key value metadata - updating value only
request = self.factory.post(url, data={'key': 'key-2', 'value': 'value-1',
'key_original': 'key-2'})
request.user = self.user
response = update_key_value_metadata(request, hs_file_type="GeoRasterLogicalFile",
file_type_id=logical_file.id)
self.assertEqual(response.status_code, status.HTTP_200_OK)
response_dict = json.loads(response.content)
self.assertEqual('success', response_dict['status'])
res_file = self.composite_resource.files.first()
logical_file = res_file.logical_file
self.assertEqual(logical_file.metadata.extra_metadata['key-2'], 'value-1')
# delete key/value data using the view function
request = self.factory.post(url, data={'key': 'key-2'})
request.user = self.user
# this the view function we are testing
response = delete_key_value_metadata(request, hs_file_type="GeoRasterLogicalFile",
file_type_id=logical_file.id)
self.assertEqual(response.status_code, status.HTTP_200_OK)
response_dict = json.loads(response.content)
self.assertEqual('success', response_dict['status'])
res_file = self.composite_resource.files.first()
logical_file = res_file.logical_file
# at this point there should not be any key/value metadata
self.assertEqual(logical_file.metadata.extra_metadata, {})
self.composite_resource.delete()
def test_CRUD_key_value_metadata_netcdf_file_type(self):
self.netcdf_file_obj = open(self.netcdf_file, 'r')
self._create_composite_resource(self.netcdf_file_obj)
res_file = self.composite_resource.files.first()
# set the nc file to NetCDF file type
NetCDFLogicalFile.set_file_type(self.composite_resource, res_file.id, self.user)
res_file = self.composite_resource.files.first()
logical_file = res_file.logical_file
self.assertEqual(res_file.logical_file_type_name, "NetCDFLogicalFile")
# no key/value metadata for the netcdf file type yet
self.assertEqual(logical_file.metadata.extra_metadata, {})
url_params = {'hs_file_type': 'NetCDFLogicalFile',
'file_type_id': logical_file.id
}
url = reverse('update_file_keyvalue_metadata', kwargs=url_params)
request = self.factory.post(url, data={'key': 'key-1', 'value': 'value-1'})
request.user = self.user
# this is the view function we are testing
response = update_key_value_metadata(request, hs_file_type="NetCDFLogicalFile",
file_type_id=logical_file.id)
self.assertEqual(response.status_code, status.HTTP_200_OK)
response_dict = json.loads(response.content)
self.assertEqual('success', response_dict['status'])
# there should be key/value metadata for the raster file type yet
res_file = self.composite_resource.files.first()
logical_file = res_file.logical_file
self.assertNotEqual(logical_file.metadata.extra_metadata, {})
self.assertEqual(logical_file.metadata.extra_metadata['key-1'], 'value-1')
# update existing key value metadata - updating both key and value
request = self.factory.post(url, data={'key': 'key-2', 'value': 'value-2',
'key_original': 'key-1'})
request.user = self.user
response = update_key_value_metadata(request, hs_file_type="NetCDFLogicalFile",
file_type_id=logical_file.id)
self.assertEqual(response.status_code, status.HTTP_200_OK)
response_dict = json.loads(response.content)
self.assertEqual('success', response_dict['status'])
res_file = self.composite_resource.files.first()
logical_file = res_file.logical_file
self.assertEqual(logical_file.metadata.extra_metadata['key-2'], 'value-2')
self.assertNotIn('key-1', logical_file.metadata.extra_metadata.keys())
# update existing key value metadata - updating value only
request = self.factory.post(url, data={'key': 'key-2', 'value': | |
<filename>autoreduce/local_sensitivity.py
from .system import System
from autoreduce import utils
import numpy as np
# from .ode import ODE
from scipy.integrate import solve_ivp, odeint
from sympy import lambdify
class SSM(System):
'''
Class that computes local sensitivity
analysis coefficients for the given Model using a numerical
approximation method discussed in
doi: https://doi.org/10.1016/0021-9991(76)90007-3
Uses numerical method to find sensitivity analysis matrix (SSM).
Both the Jacobian matrix and the Z matrix are estimated using 4th
order central difference as given in the paper.
'''
def __init__(self, x, f, params = None,
C = None, g = None, h = None, u = None,
params_values = None, x_init = None, timepoints = None):
super().__init__(x, f, params, C, g, h, u, params_values, x_init)
if timepoints is None:
timepoints = []
else:
self.timepoints = timepoints
return
def compute_Zj(self, x, j, **kwargs):
'''
Compute Z_j, i.e. df/dp_j at a particular
timepoint k for the parameter p_j.
Returns a vector of size n x 1.
Use mode = 'accurate' for this object attribute
to use accurate computations using numdifftools.
'''
# if 'mode' in kwargs:
# if kwargs.get('mode') == 'accurate':
# del kwargs['mode']
# return self.sensitivity_to_parameter(x, j, **kwargs)
# initialize Z
Z = np.zeros(self.n)
P_holder = self.params_values
# For each state
for i in range(self.n):
P = P_holder
F = np.zeros( (4,1) ) # For 4th order difference
h = P[j]*0.01 # Small parameter for this parameter
# Gets O(4) central difference on dfi/dpj
if h != 0:
P[j] = P_holder[j] + 2*h
f = self.evaluate(self.f, x, P)
F[0] = f[i]
P[j] = P_holder[j] + h
f = self.evaluate(self.f, x, P)
F[1] = f[i]
P[j] = P_holder[j] - h
f = self.evaluate(self.f, x, P)
F[2] = f[i]
P[j] = P_holder[j] - 2*h
f = self.evaluate(self.f, x, P)
F[3] = f[i]
#Store approx. dfi/dpj into Z
Z[i] = (-F[0] + 8*F[1] - 8*F[2] + F[3])/(12*h)
if Z[i] == np.Inf:
Z[i] = 1
elif Z[i] == np.NaN:
Z[i] = 0
return Z
def compute_J(self, x, **kwargs):
'''
Compute the Jacobian J = df/dx at a timepoint k.
Returns a matrix of size n x n.
Use mode = 'accurate' for this object attribute
to use accurate computations using numdifftools.
'''
if 'fun' in kwargs:
fun = kwargs.get('fun')
else:
fun = self.f
if 'var' in kwargs:
var = kwargs.get('var')
else:
var = x
# initialize J
J = np.zeros( (self.n, len(var)) )
P = self.params_values
u = self.u
if 'mode' in kwargs:
if kwargs.get('mode') == 'accurate':
del kwargs['mode']
try:
import numdifftools as nd
except:
raise ValueError('The package numdifftools is not' +
'installed for this method to work.')
fun_l = lambdify((self.x, self.params), fun)
def fun_ode(t, x, params):
y = fun_l(x, params)
return np.array(y)
jfun = nd.Jacobian(lambda x: fun_ode(0, x, P), **kwargs)
return jfun(x)
# store the variable with respect to which we approximate the differentiation (df/dvar)
X = var
for i in range(self.n):
for j in range(len(var)):
F = np.zeros( (4,1) )
h = X[j]*0.01
# Gets O(4) central difference on dfi/dvarj
if h != 0:
var = X
var[j] = X[j] + 2*h
f = self.evaluate(fun, var, P, u)
F[0] = f[i]
var[j] = X[j] + h
f = self.evaluate(fun, var, P, u)
F[1] = f[i]
var[j] = X[j] - h
f = self.evaluate(fun, var, P, u)
F[2] = f[i]
var[j] = X[j] - 2*h
f = self.evaluate(fun, var, P, u)
F[3] = f[i]
#Store approvar. dfi/dvarj into J
J[i,j]= (-F[0] + 8*F[1] - 8*F[2] + F[3])/(12*h)
# print(J[i,j])
# if J[i,j] == np.Inf:
# J[i,j] = 1
# elif J[i,j] == np.NaN:
# J[i,j] = 0
return J
def compute_SSM(self, normalize = False, **kwargs):
"""
Returns the sensitivity coefficients
S_j for each parameter p_j.
The sensitivity coefficients are written
in a sensitivity matrix SSM of size
len(timepoints) x len(params) x n
If normalize argument is true,
the coefficients are normalized by
the nominal value of each paramneter.
Use mode = 'accurate' for this object attribute
to use accurate computations using numdifftools.
"""
if 'mode' in kwargs:
if kwargs.get('mode') == 'accurate_SSM':
return self.solve_extended_ode(**kwargs)
def sens_func(t, x, J, Z):
# forms ODE to solve for sensitivity coefficient S
dsdt = J@x + Z
return dsdt
P = self.params_values
S0 = np.zeros(self.n) # Initial value for S_i
SSM = np.zeros( (len(self.timepoints), len(P), self.n) )
# solve for all x's in timeframe set by timepoints
system_obj = self.get_system()
sol = utils.get_ODE(system_obj, self.timepoints).solve_system().T
xs = sol
xs = np.reshape(xs,(len(self.timepoints), self.n))
self.xs = xs
# Solve for SSM at each time point
for k in range(len(self.timepoints)):
# print('for timepoint',self.timepoints[k])
timepoints = self.timepoints[0:k+1]
if len(timepoints) == 1:
continue
# get the jacobian matrix
J = self.compute_J(xs[k,:], **kwargs)
#Solve for S = dx/dp for all x and all P (or theta, the parameters) at time point k
for j in range(len(P)):
utils.printProgressBar(int(j + k*len(P)), len(self.timepoints)*len(P) - 1,
prefix = 'SSM Progress:', suffix = 'Complete',
length = 50)
# print('for parameter',P[j])
# get the pmatrix
Zj = self.compute_Zj(xs[k,:], j, **kwargs)
# solve for S
sens_func_ode = lambda t, x : sens_func(t, x, J, Zj)
sol = odeint(sens_func_ode, S0, timepoints, tfirst = True)
S = sol
S = np.reshape(S, (len(timepoints), self.n))
SSM[k,j,:] = S[k,:]
self.SSM = SSM
if normalize:
SSM = self.normalize_SSM() #Identifiablity was estimated using an normalized SSM
return SSM
def normalize_SSM(self):
'''
Returns normalized sensitivity coefficients.
Multiplies each sensitivity coefficient with
the corresponding parameter p_j
Divides the result by the corresponding state to
obtain the normalized coefficient that is returned.
'''
SSM_normalized = np.zeros(np.shape(self.SSM))
for j in range(len(self.params_values)):
for i in range(self.n):
SSM_normalized[:,j,i] = np.divide(self.SSM[:,j,i]*self.params_values[j], self.xs[:,i])
self.SSM_normalized = SSM_normalized
return SSM_normalized
def get_system(self):
return System(self.x, self.f, self.params, self.C, self.g,
self.h, self.u, self.params_values, self.x_init)
############## <NAME> ###########
def sensitivity_to_parameter(self, x, j, **kwargs):
'''
Calculates the response of each derivative (defined by ode) to changes
in a single parameter (the jth one) at point x.
keyword argument options?:
ode_sol - An OdeSolution object holding a continuously-interpolated
solution for ode.
ode_jac - Jacobian of the ode, as calculated by
numdifftools.Jacobian.
ode - The ODE for the system, of the form ode(t, x, params)
params - A list of parameters to feed to ode.
p - The index of the parameter to calculate sensitivities to.
t_min - Starting time.
t_max - Ending time.
returns: An OdeSolution object representing the (continously-interpolated)
sensitivity of each variable to the specified parameter over
time.
'''
# Build a scipy-integratable derivative-of-sensitivity function.
import numdifftools as nd
import copy
def dS_dt(t, s):
xs = ode_sol(t)
# Wrapper to let numdifftools calculate df/dp.
def ode_as_parameter_call(param):
call_params = copy.deepcopy(self.params_values)
call_params[j] = self.params_values
return ode(t, xs, call_params)
df_dp = lambda xs: nd.Jacobian(ode_as_parameter_call)(xs).transpose()[:,0]
return df_dp(params[j]) + np.matmul(ode_jac(xs), s)
sol = odeint(dS_dt, np.zeros(n_vars), self.timepoints, **kwargs)
return sol
############## <NAME> ###########
def solve_extended_ode(self, ode = None, params = None, t_min = None, t_max = None, init = None, method = "RK45"):
'''
Augments an ODE system (as a scipy-integratable function) into an ODE
representing the original ODE plus sensitivities, then solves them all.
The key equation here is, for a system dx/dt = f(x, p, t),
dS_j/dt = f_j + J*S_j
where S_j is the vector of sensitivities of xs to parameter j, f_j is the
vector of derivatives df/dp_j, and J is the Jacobian of f w.r.t. xs.
params:
ode - An ode to solve, with a signature ode(t, xs, parameters).
params - a vector of parameters around which to calculate sensitivity.
t_min - Starting time.
t_max - Ending time.
init - Initial conditions for the ode.
method - ODE solving method, passed directly to
scipy.integrate.odeint.
Returns: (x_sols, sensitivities)
x_sols - An OdeSolution object with the solution to the original ODE.
Shape of a solution is (n_variables, n_times)
| |
#!/usr/bin/env python
u"""
infer_minor_corrections.py (08/2020)
Return correction for minor constituents based on Richard Ray's PERTH3 code
PERTH: PREdict Tidal Heights
CALLING SEQUENCE:
dh = infer_minor_corrections(t,zmajor,constituents)
INPUTS:
t: days relative to Jan 1, 1992 (48622 MJD)
zmajor: Complex HC for given constituents/points
constituents: tidal constituent IDs
OUTPUT:
dh: height from minor constituents
OPTIONS:
DELTAT: time correction for converting to Ephemeris Time (days)
CORRECTIONS: use nodal corrections from OTIS/ATLAS or GOT models
PYTHON DEPENDENCIES:
numpy: Scientific Computing Tools For Python
https://numpy.org
https://numpy.org/doc/stable/user/numpy-for-matlab-users.html
PROGRAM DEPENDENCIES:
calc_astrol_longitudes.py: computes the basic astronomical mean longitudes
REFERENCES:
<NAME> and <NAME>, "Admiralty Manual of Tides", HMSO, (1941).
<NAME>, "Manual of Harmonic Analysis and Prediction of Tides"
US Coast and Geodetic Survey, Special Publication, 98, (1958).
<NAME> and <NAME>, "The harmonic analysis of tidal model
time series", Advances in Water Resources, 12, (1989).
UPDATE HISTORY:
Updated 08/2020: change time variable names to not overwrite functions
update nodal corrections for FES models
Updated 07/2020: added function docstrings
reduce list of minor constituents if in list of major values
Updated 11/2019: output as numpy masked arrays instead of nan-filled arrays
Updated 08/2018: added correction option ATLAS for localized OTIS solutions
Updated 07/2018: added option to use GSFC GOT nodal corrections
use the number of dates if calculating a tidal time series at a point
Updated 09/2017: Rewritten in Python
"""
import numpy as np
from pyTMD.calc_astrol_longitudes import calc_astrol_longitudes
def infer_minor_corrections(t,zmajor,constituents,DELTAT=0.0,CORRECTIONS=''):
"""
Calculate the tidal corrections for minor constituents inferred using
major constituents
Arguments
---------
t: days relative to 1992-01-01T00:00:00
zmajor: Complex HC for given constituents/points
constituents: tidal constituent IDs
Keyword arguments
-----------------
DELTAT: time correction for converting to Ephemeris Time (days)
CORRECTIONS: use nodal corrections from OTIS/ATLAS or GOT models
Returns
-------
dh: height from minor constituents
"""
#-- degrees to radians
dtr = np.pi/180.0
#-- number of constituents
npts,nc = np.shape(zmajor)
nt = len(np.atleast_1d(t))
#-- number of data points to calculate if running time series/drift/map
n = nt if ((npts == 1) & (nt > 1)) else npts
#-- allocate for output elevation correction
dh = np.ma.zeros((n))
#-- convert time from days relative to Jan 1, 1992 to Modified Julian Days
MJD = 48622.0 + t
#-- major constituents used for inferring minor tides
cindex = ['q1','o1','p1','k1','n2','m2','s2','k2','2n2']
#-- re-order major tides to correspond to order of cindex
z = np.ma.zeros((n,9),dtype=np.complex64)
nz = 0
for i,c in enumerate(cindex):
j = [j for j,val in enumerate(constituents) if (val == c)]
if j:
j1, = j
z[:,i] = zmajor[:,j1]
nz += 1
if (nz < 6):
raise Exception('Not enough constituents for inference')
#-- list of minor constituents
minor = ['2q1','sigma1','rho1','m12','m11','chi1','pi1','phi1','theta1',
'j1','oo1','2n2','mu2','nu2','lambda2','l2','l2','t2','eps2','eta2']
#-- only add minor constituents that are not on the list of major values
minor_indices = [i for i,m in enumerate(minor) if m not in constituents]
#-- relationship between major and minor constituent amplitude and phase
zmin = np.zeros((n,20),dtype=np.complex64)
zmin[:,0] = 0.263*z[:,0] - 0.0252*z[:,1]#-- 2Q1
zmin[:,1] = 0.297*z[:,0] - 0.0264*z[:,1]#-- sigma1
zmin[:,2] = 0.164*z[:,0] + 0.0048*z[:,1]#-- rho1
zmin[:,3] = 0.0140*z[:,1] + 0.0101*z[:,3]#-- M12
zmin[:,4] = 0.0389*z[:,1] + 0.0282*z[:,3]#-- M11
zmin[:,5] = 0.0064*z[:,1] + 0.0060*z[:,3]#-- chi1
zmin[:,6] = 0.0030*z[:,1] + 0.0171*z[:,3]#-- pi1
zmin[:,7] = -0.0015*z[:,1] + 0.0152*z[:,3]#-- phi1
zmin[:,8] = -0.0065*z[:,1] + 0.0155*z[:,3]#-- theta1
zmin[:,9] = -0.0389*z[:,1] + 0.0836*z[:,3]#-- J1
zmin[:,10] = -0.0431*z[:,1] + 0.0613*z[:,3]#-- OO1
zmin[:,11] = 0.264*z[:,4] - 0.0253*z[:,5]#-- 2N2
zmin[:,12] = 0.298*z[:,4] - 0.0264*z[:,5]#-- mu2
zmin[:,13] = 0.165*z[:,4] + 0.00487*z[:,5]#-- nu2
zmin[:,14] = 0.0040*z[:,5] + 0.0074*z[:,6]#-- lambda2
zmin[:,15] = 0.0131*z[:,5] + 0.0326*z[:,6]#-- L2
zmin[:,16] = 0.0033*z[:,5] + 0.0082*z[:,6]#-- L2
zmin[:,17] = 0.0585*z[:,6]#-- t2
#-- additional coefficients for FES models
if CORRECTIONS in ('FES',):
#-- spline coefficients for admittances
mu2 = [0.069439968323, 0.351535557706, -0.046278307672]
nu2 = [-0.006104695053, 0.156878802427, 0.006755704028]
l2 = [0.077137765667, -0.051653455134, 0.027869916824]
t2 = [0.180480173707, -0.020101177502, 0.008331518844]
lda2 = [0.016503557465, -0.013307812292, 0.007753383202]
zmin[:,12] = mu2[0]*z[:,7] + mu2[1]*z[:,4] + mu2[2]*z[:,5]#-- mu2
zmin[:,13] = nu2[0]*z[:,7] + nu2[1]*z[:,4] + nu2[2]*z[:,5]#-- nu2
zmin[:,14] = lda2[0]*z[:,7] + lda2[1]*z[:,4] + lda2[2]*z[:,5]#-- lambda2
zmin[:,16] = l2[0]*z[:,7] + l2[1]*z[:,4] + l2[2]*z[:,5]#-- L2
zmin[:,17] = t2[0]*z[:,7] + t2[1]*z[:,4] + t2[2]*z[:,5]#-- t2
zmin[:,18] = 0.53285*z[:,8] - 0.03304*z[:,4]#-- eps2
zmin[:,19] = -0.0034925*z[:,5] + 0.0831707*z[:,7]#-- eta2
hour = (t % 1)*24.0
t1 = 15.0*hour
t2 = 30.0*hour
#-- set function for astronomical longitudes
ASTRO5 = True if CORRECTIONS in ('GOT','FES') else False
#-- convert from Modified Julian Dates into Ephemeris Time
S,H,P,omega,pp = calc_astrol_longitudes(MJD+DELTAT, ASTRO5=ASTRO5)
#-- determine equilibrium tidal arguments
arg = np.zeros((n,20))
arg[:,0] = t1 - 4.0*S + H + 2.0*P - 90.0#-- 2Q1
arg[:,1] = t1 - 4.0*S + 3.0*H - 90.0#-- sigma1
arg[:,2] = t1 - 3.0*S + 3.0*H - P - 90.0#-- rho1
arg[:,3] = t1 - S + H - P + 90.0#-- M12
arg[:,4] = t1 - S + H + P + 90.0#-- M11
arg[:,5] = t1 - S + 3.0*H - P + 90.0#-- chi1
arg[:,6] = t1 - 2.0*H + pp - 90.0#-- pi1
arg[:,7] = t1 + 3.0*H + 90.0#-- phi1
arg[:,8] = t1 + S - H + P + 90.0#-- theta1
arg[:,9] = t1 + S + H - P + 90.0#-- J1
arg[:,10] = t1 + 2.0*S + H + 90.0#-- OO1
arg[:,11] = t2 - 4.0*S + 2.0*H + 2.0*P#-- 2N2
arg[:,12] = t2 - 4.0*S + 4.0*H#-- mu2
arg[:,13] = t2 - 3.0*S + 4.0*H - P#-- nu2
arg[:,14] = t2 - S + P + 180.0#-- lambda2
arg[:,15] = t2 - S + 2.0*H - P + 180.0#-- L2
arg[:,16] = t2 - S + 2.0*H + P#-- L2
arg[:,17] = t2 - H + pp#-- t2
arg[:,18] = t2 - 5.0*S + 4.0*H + P #-- eps2
arg[:,19] = t2 + S + 2.0*H - pp #-- eta2
#-- determine nodal corrections f and u
sinn = np.sin(omega*dtr)
cosn = np.cos(omega*dtr)
sin2n = np.sin(2.0*omega*dtr)
cos2n = np.cos(2.0*omega*dtr)
f = np.ones((n,20))
f[:,0] = np.sqrt((1.0 + 0.189*cosn - 0.0058*cos2n)**2 +
(0.189*sinn - 0.0058*sin2n)**2)#-- 2Q1
f[:,1] = f[:,0]#-- sigma1
f[:,2] = f[:,0]#-- rho1
f[:,3] = np.sqrt((1.0 + 0.185*cosn)**2 + (0.185*sinn)**2)#-- M12
f[:,4] = np.sqrt((1.0 + 0.201*cosn)**2 + (0.201*sinn)**2)#-- M11
f[:,5] = np.sqrt((1.0 + 0.221*cosn)**2 + (0.221*sinn)**2)#-- chi1
f[:,9] = np.sqrt((1.0 + 0.198*cosn)**2 + (0.198*sinn)**2)#-- J1
f[:,10] = np.sqrt((1.0 + 0.640*cosn + 0.134*cos2n)**2 +
(0.640*sinn + 0.134*sin2n)**2)#-- OO1
f[:,11] = np.sqrt((1.0 - 0.0373*cosn)**2 + (0.0373*sinn)**2)#-- 2N2
f[:,12] = f[:,11]#-- mu2
f[:,13] = f[:,11]#-- nu2
f[:,15] = f[:,11]#-- L2
f[:,16] = np.sqrt((1.0 + 0.441*cosn)**2 + (0.441*sinn)**2)#-- L2
u = np.zeros((n,20))
u[:,0] = np.arctan2(0.189*sinn - 0.0058*sin2n,
1.0 + 0.189*cosn - 0.0058*sin2n)/dtr#-- 2Q1
u[:,1] = u[:,0]#-- sigma1
u[:,2] = u[:,0]#-- rho1
u[:,3] = np.arctan2( 0.185*sinn, 1.0 + 0.185*cosn)/dtr#-- M12
u[:,4] = np.arctan2(-0.201*sinn, 1.0 + 0.201*cosn)/dtr#-- M11
u[:,5] = np.arctan2(-0.221*sinn, 1.0 + 0.221*cosn)/dtr#-- chi1
u[:,9] = np.arctan2(-0.198*sinn, 1.0 + 0.198*cosn)/dtr#-- J1
u[:,10] = np.arctan2(-0.640*sinn - 0.134*sin2n,
1.0 + 0.640*cosn + 0.134*cos2n)/dtr#-- OO1
u[:,11] = np.arctan2(-0.0373*sinn, 1.0 - 0.0373*cosn)/dtr#-- 2N2
u[:,12] = u[:,11]#-- mu2
u[:,13] = u[:,11]#-- nu2
u[:,15] = u[:,11]#-- L2
u[:,16] = np.arctan2(-0.441*sinn, 1.0 + 0.441*cosn)/dtr#-- L2
if CORRECTIONS in ('FES',):
#-- additional astronomical terms for FES models
II = np.arccos(0.913694997 - 0.035692561*np.cos(omega*dtr))
at1 = np.arctan(1.01883*np.tan(omega*dtr/2.0))
at2 = np.arctan(0.64412*np.tan(omega*dtr/2.0))
xi = -at1 - at2 + omega*dtr
xi[xi > np.pi] -= 2.0*np.pi
nu = at1 - at2
I2 = np.tan(II/2.0)
Ra1 = np.sqrt(1.0 - 12.0*(I2**2)*np.cos(2.0*(P - xi)) + 36.0*(I2**4))
P2 = np.sin(2.0*(P - xi))
Q2 = 1.0/(6.0*(I2**2)) - np.cos(2.0*(P - xi))
R = np.arctan(P2/Q2)
f[:,0] = np.sin(II)*(np.cos(II/2.0)**2)/0.38 #-- 2Q1
f[:,1] = f[:,0] #-- sigma1
f[:,2] = f[:,0] #-- rho1
f[:,3] = f[:,0] #-- M12
f[:,4] = np.sin(2.0*II)/0.7214 #-- M11
f[:,5] = f[:,4] #-- chi1
f[:,9] = f[:,5] #-- J1
f[:,10] = np.sin(II)*np.power(np.sin(II/2.0),2.0)/0.01640 #-- OO1
f[:,11] = np.power(np.cos(II/2.0),4.0)/0.9154 #-- 2N2
f[:,12] = f[:,11] #-- mu2
f[:,13] = f[:,11] #-- nu2
f[:,14] = f[:,11] #-- lambda2
f[:,15] = f[:,11]*Ra1 #-- L2
f[:,18] = f[:,11] #-- eps2
f[:,19] = np.power(np.sin(II),2.0)/0.1565 #-- eta2
u[:,0] = (2.0*xi - nu)/dtr #-- 2Q1
u[:,1] = u[:,0] #-- sigma1
u[:,2] = u[:,0] #-- rho1
u[:,3] = u[:,0] #-- M12
u[:,4] = -nu/dtr #-- M11
u[:,5] | |
= None
def __init__(self, Id=None, Status=None, StatusInfo=None, Message=None, gds_collector_=None, **kwargs_):
self.gds_collector_ = gds_collector_
self.gds_elementtree_node_ = None
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.ns_prefix_ = None
self.Id = Id
self.validate_ImageId(self.Id)
self.Id_nsprefix_ = None
self.Status = Status
self.validate_UploadImageStatusType(self.Status)
self.Status_nsprefix_ = None
self.StatusInfo = StatusInfo
self.validate_UploadImageStatusInfoType(self.StatusInfo)
self.StatusInfo_nsprefix_ = None
self.Message = Message
self.Message_nsprefix_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, ImageUploadStatusDetail)
if subclass is not None:
return subclass(*args_, **kwargs_)
if ImageUploadStatusDetail.subclass:
return ImageUploadStatusDetail.subclass(*args_, **kwargs_)
else:
return ImageUploadStatusDetail(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ns_prefix_(self):
return self.ns_prefix_
def set_ns_prefix_(self, ns_prefix):
self.ns_prefix_ = ns_prefix
def get_Id(self):
return self.Id
def set_Id(self, Id):
self.Id = Id
def get_Status(self):
return self.Status
def set_Status(self, Status):
self.Status = Status
def get_StatusInfo(self):
return self.StatusInfo
def set_StatusInfo(self, StatusInfo):
self.StatusInfo = StatusInfo
def get_Message(self):
return self.Message
def set_Message(self, Message):
self.Message = Message
def validate_ImageId(self, value):
result = True
# Validate type ImageId, a restriction on xs:string.
if value is not None and Validate_simpletypes_ and self.gds_collector_ is not None:
if not isinstance(value, str):
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s is not of the correct base simple type (str)' % {"value": value, "lineno": lineno, })
return False
value = value
enumerations = ['IMAGE_1', 'IMAGE_2', 'IMAGE_3', 'IMAGE_4', 'IMAGE_5']
if value not in enumerations:
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s does not match xsd enumeration restriction on ImageId' % {"value" : encode_str_2_3(value), "lineno": lineno} )
result = False
return result
def validate_UploadImageStatusType(self, value):
result = True
# Validate type UploadImageStatusType, a restriction on xs:string.
if value is not None and Validate_simpletypes_ and self.gds_collector_ is not None:
if not isinstance(value, str):
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s is not of the correct base simple type (str)' % {"value": value, "lineno": lineno, })
return False
value = value
enumerations = ['ERROR', 'FAILURE', 'SUCCESS']
if value not in enumerations:
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s does not match xsd enumeration restriction on UploadImageStatusType' % {"value" : encode_str_2_3(value), "lineno": lineno} )
result = False
return result
def validate_UploadImageStatusInfoType(self, value):
result = True
# Validate type UploadImageStatusInfoType, a restriction on xs:string.
if value is not None and Validate_simpletypes_ and self.gds_collector_ is not None:
if not isinstance(value, str):
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s is not of the correct base simple type (str)' % {"value": value, "lineno": lineno, })
return False
value = value
enumerations = ['IMAGE_EXCEEDS_MAX_RESOLUTION', 'IMAGE_EXCEEDS_MAX_SIZE', 'IMAGE_FAILED_VIRUS_CHECK', 'IMAGE_ID_INVALID', 'IMAGE_ID_MISSING', 'IMAGE_MISSING', 'IMAGE_TYPE_INVALID', 'IMAGE_TYPE_MISSING']
if value not in enumerations:
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s does not match xsd enumeration restriction on UploadImageStatusInfoType' % {"value" : encode_str_2_3(value), "lineno": lineno} )
result = False
return result
def hasContent_(self):
if (
self.Id is not None or
self.Status is not None or
self.StatusInfo is not None or
self.Message is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='ImageUploadStatusDetail', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('ImageUploadStatusDetail')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None and name_ == 'ImageUploadStatusDetail':
name_ = self.original_tagname_
if UseCapturedNS_ and self.ns_prefix_:
namespaceprefix_ = self.ns_prefix_ + ':'
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='ImageUploadStatusDetail')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_, namespacedef_, name_='ImageUploadStatusDetail', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='ImageUploadStatusDetail'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='ImageUploadStatusDetail', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Id is not None:
namespaceprefix_ = self.Id_nsprefix_ + ':' if (UseCapturedNS_ and self.Id_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sId>%s</%sId>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.Id), input_name='Id')), namespaceprefix_ , eol_))
if self.Status is not None:
namespaceprefix_ = self.Status_nsprefix_ + ':' if (UseCapturedNS_ and self.Status_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sStatus>%s</%sStatus>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.Status), input_name='Status')), namespaceprefix_ , eol_))
if self.StatusInfo is not None:
namespaceprefix_ = self.StatusInfo_nsprefix_ + ':' if (UseCapturedNS_ and self.StatusInfo_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sStatusInfo>%s</%sStatusInfo>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.StatusInfo), input_name='StatusInfo')), namespaceprefix_ , eol_))
if self.Message is not None:
namespaceprefix_ = self.Message_nsprefix_ + ':' if (UseCapturedNS_ and self.Message_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sMessage>%s</%sMessage>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.Message), input_name='Message')), namespaceprefix_ , eol_))
def build(self, node, gds_collector_=None):
self.gds_collector_ = gds_collector_
if SaveElementTreeNode:
self.gds_elementtree_node_ = node
already_processed = set()
self.ns_prefix_ = node.prefix
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_, gds_collector_=gds_collector_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False, gds_collector_=None):
if nodeName_ == 'Id':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Id')
value_ = self.gds_validate_string(value_, node, 'Id')
self.Id = value_
self.Id_nsprefix_ = child_.prefix
# validate type ImageId
self.validate_ImageId(self.Id)
elif nodeName_ == 'Status':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Status')
value_ = self.gds_validate_string(value_, node, 'Status')
self.Status = value_
self.Status_nsprefix_ = child_.prefix
# validate type UploadImageStatusType
self.validate_UploadImageStatusType(self.Status)
elif nodeName_ == 'StatusInfo':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'StatusInfo')
value_ = self.gds_validate_string(value_, node, 'StatusInfo')
self.StatusInfo = value_
self.StatusInfo_nsprefix_ = child_.prefix
# validate type UploadImageStatusInfoType
self.validate_UploadImageStatusInfoType(self.StatusInfo)
elif nodeName_ == 'Message':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Message')
value_ = self.gds_validate_string(value_, node, 'Message')
self.Message = value_
self.Message_nsprefix_ = child_.prefix
# end class ImageUploadStatusDetail
class Localization(GeneratedsSuper):
"""Identifies the representation of human-readable text."""
__hash__ = GeneratedsSuper.__hash__
subclass = None
superclass = None
def __init__(self, LanguageCode=None, LocaleCode=None, gds_collector_=None, **kwargs_):
self.gds_collector_ = gds_collector_
self.gds_elementtree_node_ = None
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.ns_prefix_ = None
self.LanguageCode = LanguageCode
self.LanguageCode_nsprefix_ = None
self.LocaleCode = LocaleCode
self.LocaleCode_nsprefix_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, Localization)
if subclass is not None:
return subclass(*args_, **kwargs_)
if Localization.subclass:
return Localization.subclass(*args_, **kwargs_)
else:
return Localization(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ns_prefix_(self):
return self.ns_prefix_
def set_ns_prefix_(self, ns_prefix):
self.ns_prefix_ = ns_prefix
def get_LanguageCode(self):
return self.LanguageCode
def set_LanguageCode(self, LanguageCode):
self.LanguageCode = LanguageCode
def get_LocaleCode(self):
return self.LocaleCode
def set_LocaleCode(self, LocaleCode):
self.LocaleCode = LocaleCode
def hasContent_(self):
if (
self.LanguageCode is not None or
self.LocaleCode is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='Localization', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('Localization')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None and name_ == 'Localization':
name_ = self.original_tagname_
if UseCapturedNS_ and self.ns_prefix_:
namespaceprefix_ = self.ns_prefix_ + ':'
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='Localization')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_, namespacedef_, name_='Localization', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='Localization'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='Localization', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.LanguageCode is not None:
namespaceprefix_ = self.LanguageCode_nsprefix_ + ':' if (UseCapturedNS_ and self.LanguageCode_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sLanguageCode>%s</%sLanguageCode>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.LanguageCode), input_name='LanguageCode')), namespaceprefix_ , eol_))
if self.LocaleCode is not None:
namespaceprefix_ = self.LocaleCode_nsprefix_ + ':' if (UseCapturedNS_ and self.LocaleCode_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sLocaleCode>%s</%sLocaleCode>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.LocaleCode), input_name='LocaleCode')), namespaceprefix_ , eol_))
def build(self, node, gds_collector_=None):
self.gds_collector_ = gds_collector_
if SaveElementTreeNode:
self.gds_elementtree_node_ = node
already_processed = set()
self.ns_prefix_ = node.prefix
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_, gds_collector_=gds_collector_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False, gds_collector_=None):
if nodeName_ == 'LanguageCode':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'LanguageCode')
value_ = self.gds_validate_string(value_, node, 'LanguageCode')
self.LanguageCode = value_
self.LanguageCode_nsprefix_ = child_.prefix
elif nodeName_ == 'LocaleCode':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'LocaleCode')
value_ = self.gds_validate_string(value_, node, 'LocaleCode')
self.LocaleCode = value_
self.LocaleCode_nsprefix_ = child_.prefix
# end class Localization
class Notification(GeneratedsSuper):
"""The descriptive data regarding the result of the submitted
transaction."""
__hash__ = GeneratedsSuper.__hash__
subclass = None
superclass = None
def __init__(self, Severity=None, Source=None, Code=None, Message=None, LocalizedMessage=None, MessageParameters=None, gds_collector_=None, **kwargs_):
self.gds_collector_ = gds_collector_
self.gds_elementtree_node_ = | |
+ 0.5*m.b684*m.b807 + 0.5*m.b684*m.b808
+ 0.5*m.b684*m.b818 + 0.5*m.b684*m.b825 + 0.5*m.b685*m.b691 + 0.5*m.b685*m.b692 + 0.5*m.b685*
m.b696 + 0.5*m.b685*m.b698 + 0.5*m.b685*m.b701 + 0.5*m.b685*m.b707 + 0.5*m.b685*m.b710 + 0.5*
m.b685*m.b711 + 0.5*m.b685*m.b712 + 0.5*m.b685*m.b713 + 0.5*m.b685*m.b715 + m.b685*m.b720 + 0.5*
m.b685*m.b730 + 0.5*m.b685*m.b740 + 0.5*m.b685*m.b741 + 0.5*m.b685*m.b743 + 0.5*m.b685*m.b744 +
0.5*m.b685*m.b745 + 0.5*m.b685*m.b748 + 0.5*m.b685*m.b751 + 0.5*m.b685*m.b752 + m.b685*m.b756 +
0.5*m.b685*m.b763 + m.b685*m.b766 + 0.5*m.b685*m.b768 + 0.5*m.b685*m.b769 + 0.5*m.b685*m.b770 +
0.5*m.b685*m.b771 + 0.5*m.b685*m.b785 + m.b685*m.b786 + 0.5*m.b685*m.b791 + 0.5*m.b685*m.b796 +
0.5*m.b685*m.b800 + 0.5*m.b685*m.b802 + 0.5*m.b685*m.b807 + 0.5*m.b685*m.b810 + 0.5*m.b685*m.b812
+ 0.5*m.b685*m.b813 + 0.5*m.b685*m.b817 + 0.5*m.b685*m.b819 + 0.5*m.b685*m.b825 + 0.5*m.b685*
m.b827 + 0.5*m.b685*m.b828 + 0.5*m.b685*m.b830 + m.b686*m.b687 + 0.5*m.b686*m.b688 + 0.5*m.b686*
m.b689 + 0.5*m.b686*m.b690 + 0.5*m.b686*m.b695 + 0.5*m.b686*m.b704 + 0.5*m.b686*m.b714 + 0.5*
m.b686*m.b717 + 0.5*m.b686*m.b731 + m.b686*m.b737 + 0.5*m.b686*m.b738 + m.b686*m.b739 + 0.5*
m.b686*m.b743 + 0.5*m.b686*m.b745 + 0.5*m.b686*m.b749 + 0.5*m.b686*m.b759 + 0.5*m.b686*m.b765 +
0.5*m.b686*m.b768 + 0.5*m.b686*m.b770 + 0.5*m.b686*m.b788 + 0.5*m.b686*m.b789 + 0.5*m.b686*m.b798
+ 0.5*m.b686*m.b799 + 0.5*m.b686*m.b811 + m.b686*m.b814 + 0.5*m.b686*m.b816 + 0.5*m.b686*m.b819
+ 0.5*m.b686*m.b834 + 0.5*m.b687*m.b688 + 0.5*m.b687*m.b689 + 0.5*m.b687*m.b690 + 0.5*m.b687*
m.b695 + 0.5*m.b687*m.b704 + 0.5*m.b687*m.b714 + 0.5*m.b687*m.b717 + 0.5*m.b687*m.b731 + m.b687*
m.b737 + 0.5*m.b687*m.b738 + m.b687*m.b739 + 0.5*m.b687*m.b743 + 0.5*m.b687*m.b745 + 0.5*m.b687*
m.b749 + 0.5*m.b687*m.b759 + 0.5*m.b687*m.b765 + 0.5*m.b687*m.b768 + 0.5*m.b687*m.b770 + 0.5*
m.b687*m.b788 + 0.5*m.b687*m.b789 + 0.5*m.b687*m.b798 + 0.5*m.b687*m.b799 + 0.5*m.b687*m.b811 +
m.b687*m.b814 + 0.5*m.b687*m.b816 + 0.5*m.b687*m.b819 + 0.5*m.b687*m.b834 + m.b688*m.b689 + 0.5*
m.b688*m.b699 + 0.5*m.b688*m.b704 + 0.5*m.b688*m.b705 + 0.5*m.b688*m.b708 + 0.5*m.b688*m.b714 +
m.b688*m.b717 + 0.5*m.b688*m.b737 + m.b688*m.b738 + 0.5*m.b688*m.b739 + 0.5*m.b688*m.b744 + 0.5*
m.b688*m.b749 + 0.5*m.b688*m.b751 + 0.5*m.b688*m.b759 + 0.5*m.b688*m.b765 + 0.5*m.b688*m.b777 +
0.5*m.b688*m.b785 + 0.5*m.b688*m.b788 + 0.5*m.b688*m.b789 + 0.5*m.b688*m.b798 + m.b688*m.b799 +
0.5*m.b688*m.b811 + 0.5*m.b688*m.b814 + 0.5*m.b688*m.b816 + 0.5*m.b688*m.b817 + 0.5*m.b688*m.b827
+ 0.5*m.b688*m.b832 + 0.5*m.b689*m.b699 + 0.5*m.b689*m.b704 + 0.5*m.b689*m.b705 + 0.5*m.b689*
m.b708 + 0.5*m.b689*m.b714 + m.b689*m.b717 + 0.5*m.b689*m.b737 + m.b689*m.b738 + 0.5*m.b689*
m.b739 + 0.5*m.b689*m.b744 + 0.5*m.b689*m.b749 + 0.5*m.b689*m.b751 + 0.5*m.b689*m.b759 + 0.5*
m.b689*m.b765 + 0.5*m.b689*m.b777 + 0.5*m.b689*m.b785 + 0.5*m.b689*m.b788 + 0.5*m.b689*m.b789 +
0.5*m.b689*m.b798 + m.b689*m.b799 + 0.5*m.b689*m.b811 + 0.5*m.b689*m.b814 + 0.5*m.b689*m.b816 +
0.5*m.b689*m.b817 + 0.5*m.b689*m.b827 + 0.5*m.b689*m.b832 + m.b690*m.b695 + 0.5*m.b690*m.b700 +
0.5*m.b690*m.b724 + m.b690*m.b731 + 0.5*m.b690*m.b736 + 0.5*m.b690*m.b737 + 0.5*m.b690*m.b739 +
0.5*m.b690*m.b743 + 0.5*m.b690*m.b745 + 0.5*m.b690*m.b768 + 0.5*m.b690*m.b770 + 0.5*m.b690*m.b775
+ 0.5*m.b690*m.b804 + 0.5*m.b690*m.b814 + 0.5*m.b690*m.b819 + m.b690*m.b834 + 0.5*m.b690*m.b836
+ 0.5*m.b691*m.b692 + 0.5*m.b691*m.b696 + m.b691*m.b701 + 0.5*m.b691*m.b707 + m.b691*m.b710 +
0.5*m.b691*m.b712 + 0.5*m.b691*m.b716 + 0.5*m.b691*m.b718 + 0.5*m.b691*m.b720 + 0.5*m.b691*m.b725
+ 0.5*m.b691*m.b727 + 0.5*m.b691*m.b733 + 0.5*m.b691*m.b734 + 0.5*m.b691*m.b746 + 0.5*m.b691*
m.b750 + 0.5*m.b691*m.b752 + 0.5*m.b691*m.b753 + 0.5*m.b691*m.b756 + 0.5*m.b691*m.b764 + 0.5*
m.b691*m.b766 + 0.5*m.b691*m.b771 + 0.5*m.b691*m.b783 + 0.5*m.b691*m.b786 + 0.5*m.b691*m.b792 +
0.5*m.b691*m.b793 + 0.5*m.b691*m.b796 + 0.5*m.b691*m.b797 + 0.5*m.b691*m.b800 + 0.5*m.b691*m.b802
+ 0.5*m.b691*m.b806 + 0.5*m.b691*m.b807 + 0.5*m.b691*m.b810 + m.b691*m.b828 + m.b691*m.b830 +
0.5*m.b691*m.b833 + 0.5*m.b692*m.b693 + 0.5*m.b692*m.b694 + 0.5*m.b692*m.b696 + 0.5*m.b692*m.b697
+ 0.5*m.b692*m.b699 + 0.5*m.b692*m.b701 + 0.5*m.b692*m.b703 + 0.5*m.b692*m.b705 + 0.5*m.b692*
m.b706 + 0.5*m.b692*m.b707 + 0.5*m.b692*m.b708 + 0.5*m.b692*m.b709 + 0.5*m.b692*m.b710 + 0.5*
m.b692*m.b712 + 0.5*m.b692*m.b720 + 0.5*m.b692*m.b722 + 0.5*m.b692*m.b723 + 0.5*m.b692*m.b732 +
0.5*m.b692*m.b742 + 0.5*m.b692*m.b752 + 0.5*m.b692*m.b754 + 0.5*m.b692*m.b756 + 0.5*m.b692*m.b766
+ 0.5*m.b692*m.b771 + 0.5*m.b692*m.b772 + 0.5*m.b692*m.b774 + 0.5*m.b692*m.b777 + 0.5*m.b692*
m.b778 + 0.5*m.b692*m.b779 + 0.5*m.b692*m.b784 + 0.5*m.b692*m.b786 + 0.5*m.b692*m.b794 + 0.5*
m.b692*m.b795 + m.b692*m.b796 + m.b692*m.b800 + 0.5*m.b692*m.b801 + m.b692*m.b802 + 0.5*m.b692*
m.b805 + 0.5*m.b692*m.b807 + m.b692*m.b810 + 0.5*m.b692*m.b815 + 0.5*m.b692*m.b824 + 0.5*m.b692*
m.b828 + 0.5*m.b692*m.b829 + 0.5*m.b692*m.b830 + 0.5*m.b692*m.b831 + 0.5*m.b692*m.b832 + 0.5*
m.b692*m.b835 + 0.5*m.b693*m.b694 + m.b693*m.b697 + 0.5*m.b693*m.b699 + 0.5*m.b693*m.b703 + 0.5*
m.b693*m.b705 + 0.5*m.b693*m.b706 + 0.5*m.b693*m.b708 + m.b693*m.b709 + 0.5*m.b693*m.b711 + 0.5*
m.b693*m.b719 + 0.5*m.b693*m.b722 + 0.5*m.b693*m.b723 + 0.5*m.b693*m.b730 + 0.5*m.b693*m.b732 +
0.5*m.b693*m.b741 + 0.5*m.b693*m.b742 + 0.5*m.b693*m.b754 + 0.5*m.b693*m.b763 + 0.5*m.b693*m.b772
+ 0.5*m.b693*m.b774 + 0.5*m.b693*m.b777 + 0.5*m.b693*m.b778 + 0.5*m.b693*m.b779 + 0.5*m.b693*
m.b784 + m.b693*m.b794 + 0.5*m.b693*m.b795 + 0.5*m.b693*m.b796 + 0.5*m.b693*m.b800 + 0.5*m.b693*
m.b801 + 0.5*m.b693*m.b802 + m.b693*m.b805 + 0.5*m.b693*m.b810 + 0.5*m.b693*m.b815 + 0.5*m.b693*
m.b824 + 0.5*m.b693*m.b825 + 0.5*m.b693*m.b829 + 0.5*m.b693*m.b831 + 0.5*m.b693*m.b832 + 0.5*
m.b693*m.b835 + 0.5*m.b694*m.b697 + 0.5*m.b694*m.b699 + 0.5*m.b694*m.b703 + 0.5*m.b694*m.b705 +
0.5*m.b694*m.b706 + 0.5*m.b694*m.b708 + 0.5*m.b694*m.b709 + 0.5*m.b694*m.b722 + 0.5*m.b694*m.b723
+ 0.5*m.b694*m.b727 + 0.5*m.b694*m.b732 + 0.5*m.b694*m.b733 + 0.5*m.b694*m.b734 + 0.5*m.b694*
m.b742 + 0.5*m.b694*m.b750 + 0.5*m.b694*m.b754 + 0.5*m.b694*m.b755 + 0.5*m.b694*m.b760 + 0.5*
m.b694*m.b772 + 0.5*m.b694*m.b774 + 0.5*m.b694*m.b777 + 0.5*m.b694*m.b778 + m.b694*m.b779 + 0.5*
m.b694*m.b784 + 0.5*m.b694*m.b792 + 0.5*m.b694*m.b794 + m.b694*m.b795 + 0.5*m.b694*m.b796 + 0.5*
m.b694*m.b800 + m.b694*m.b801 + 0.5*m.b694*m.b802 + 0.5*m.b694*m.b805 + 0.5*m.b694*m.b808 + 0.5*
m.b694*m.b810 + 0.5*m.b694*m.b815 + 0.5*m.b694*m.b824 + m.b694*m.b829 + 0.5*m.b694*m.b831 + 0.5*
m.b694*m.b832 + 0.5*m.b694*m.b835 + 0.5*m.b695*m.b700 + 0.5*m.b695*m.b724 + m.b695*m.b731 + 0.5*
m.b695*m.b736 + 0.5*m.b695*m.b737 + 0.5*m.b695*m.b739 + 0.5*m.b695*m.b743 + 0.5*m.b695*m.b745 +
0.5*m.b695*m.b768 + 0.5*m.b695*m.b770 + 0.5*m.b695*m.b775 + 0.5*m.b695*m.b804 + 0.5*m.b695*m.b814
+ 0.5*m.b695*m.b819 + m.b695*m.b834 + 0.5*m.b695*m.b836 + 0.5*m.b696*m.b701 + m.b696*m.b707 +
0.5*m.b696*m.b710 + m.b696*m.b712 + 0.5*m.b696*m.b719 + 0.5*m.b696*m.b720 + 0.5*m.b696*m.b721 +
0.5*m.b696*m.b735 + 0.5*m.b696*m.b747 + m.b696*m.b752 + 0.5*m.b696*m.b755 + 0.5*m.b696*m.b756 +
0.5*m.b696*m.b760 + 0.5*m.b696*m.b766 + 0.5*m.b696*m.b771 + 0.5*m.b696*m.b782 + 0.5*m.b696*m.b786
+ 0.5*m.b696*m.b796 + 0.5*m.b696*m.b800 + 0.5*m.b696*m.b802 + m.b696*m.b807 + 0.5*m.b696*m.b808
+ 0.5*m.b696*m.b810 + 0.5*m.b696*m.b818 + 0.5*m.b696*m.b828 + 0.5*m.b696*m.b830 + 0.5*m.b697*
m.b699 + 0.5*m.b697*m.b703 + 0.5*m.b697*m.b705 + 0.5*m.b697*m.b706 + 0.5*m.b697*m.b708 + m.b697*
m.b709 + 0.5*m.b697*m.b711 + 0.5*m.b697*m.b719 + 0.5*m.b697*m.b722 + 0.5*m.b697*m.b723 + 0.5*
m.b697*m.b730 + 0.5*m.b697*m.b732 + 0.5*m.b697*m.b741 + 0.5*m.b697*m.b742 + 0.5*m.b697*m.b754 +
0.5*m.b697*m.b763 + 0.5*m.b697*m.b772 + 0.5*m.b697*m.b774 + 0.5*m.b697*m.b777 + 0.5*m.b697*m.b778
+ 0.5*m.b697*m.b779 + 0.5*m.b697*m.b784 + m.b697*m.b794 + 0.5*m.b697*m.b795 + 0.5*m.b697*m.b796
+ 0.5*m.b697*m.b800 + 0.5*m.b697*m.b801 + 0.5*m.b697*m.b802 + m.b697*m.b805 + 0.5*m.b697*m.b810
+ 0.5*m.b697*m.b815 + 0.5*m.b697*m.b824 + 0.5*m.b697*m.b825 + 0.5*m.b697*m.b829 + 0.5*m.b697*
m.b831 + 0.5*m.b697*m.b832 + 0.5*m.b697*m.b835 + 0.5*m.b698*m.b711 + m.b698*m.b713 + m.b698*
m.b715 + 0.5*m.b698*m.b718 + 0.5*m.b698*m.b720 + 0.5*m.b698*m.b721 + 0.5*m.b698*m.b722 + 0.5*
m.b698*m.b730 + 0.5*m.b698*m.b732 + 0.5*m.b698*m.b735 + m.b698*m.b740 + 0.5*m.b698*m.b741 + 0.5*
m.b698*m.b742 + 0.5*m.b698*m.b743 + 0.5*m.b698*m.b744 + 0.5*m.b698*m.b745 + 0.5*m.b698*m.b746 +
0.5*m.b698*m.b747 + 0.5*m.b698*m.b748 + 0.5*m.b698*m.b751 + 0.5*m.b698*m.b753 + 0.5*m.b698*m.b756
+ 0.5*m.b698*m.b763 + 0.5*m.b698*m.b766 + 0.5*m.b698*m.b768 + m.b698*m.b769 + 0.5*m.b698*m.b770
+ 0.5*m.b698*m.b776 + 0.5*m.b698*m.b782 + 0.5*m.b698*m.b785 + 0.5*m.b698*m.b786 + 0.5*m.b698*
m.b791 + 0.5*m.b698*m.b793 + 0.5*m.b698*m.b806 + 0.5*m.b698*m.b812 + 0.5*m.b698*m.b813 + 0.5*
m.b698*m.b817 + 0.5*m.b698*m.b818 + 0.5*m.b698*m.b819 + 0.5*m.b698*m.b824 + 0.5*m.b698*m.b825 +
0.5*m.b698*m.b827 + 0.5*m.b698*m.b831 + 0.5*m.b699*m.b703 + m.b699*m.b705 + 0.5*m.b699*m.b706 +
m.b699*m.b708 + 0.5*m.b699*m.b709 + 0.5*m.b699*m.b717 + 0.5*m.b699*m.b722 + 0.5*m.b699*m.b723 +
0.5*m.b699*m.b732 + 0.5*m.b699*m.b738 + 0.5*m.b699*m.b742 + 0.5*m.b699*m.b744 + 0.5*m.b699*m.b751
+ 0.5*m.b699*m.b754 + 0.5*m.b699*m.b772 + 0.5*m.b699*m.b774 + m.b699*m.b777 + 0.5*m.b699*m.b778
+ 0.5*m.b699*m.b779 + 0.5*m.b699*m.b784 + 0.5*m.b699*m.b785 + 0.5*m.b699*m.b794 + 0.5*m.b699*
m.b795 + 0.5*m.b699*m.b796 + 0.5*m.b699*m.b799 + 0.5*m.b699*m.b800 + 0.5*m.b699*m.b801 + 0.5*
m.b699*m.b802 + 0.5*m.b699*m.b805 + 0.5*m.b699*m.b810 + 0.5*m.b699*m.b815 + 0.5*m.b699*m.b817 +
0.5*m.b699*m.b824 + 0.5*m.b699*m.b827 + 0.5*m.b699*m.b829 + 0.5*m.b699*m.b831 + m.b699*m.b832 +
0.5*m.b699*m.b835 + 0.5*m.b700*m.b702 + m.b700*m.b724 + 0.5*m.b700*m.b726 + 0.5*m.b700*m.b728 +
0.5*m.b700*m.b729 + 0.5*m.b700*m.b731 + m.b700*m.b736 + 0.5*m.b700*m.b757 + 0.5*m.b700*m.b758 +
0.5*m.b700*m.b762 + 0.5*m.b700*m.b767 + m.b700*m.b775 + 0.5*m.b700*m.b780 + 0.5*m.b700*m.b781 +
0.5*m.b700*m.b787 + 0.5*m.b700*m.b803 + 0.5*m.b700*m.b804 + 0.5*m.b700*m.b820 + 0.5*m.b700*m.b821
+ 0.5*m.b700*m.b822 + 0.5*m.b700*m.b834 + m.b700*m.b836 + 0.5*m.b701*m.b707 + m.b701*m.b710 +
0.5*m.b701*m.b712 + 0.5*m.b701*m.b716 + 0.5*m.b701*m.b718 + 0.5*m.b701*m.b720 + 0.5*m.b701*m.b725
+ 0.5*m.b701*m.b727 + 0.5*m.b701*m.b733 + 0.5*m.b701*m.b734 + 0.5*m.b701*m.b746 + 0.5*m.b701*
m.b750 + 0.5*m.b701*m.b752 + 0.5*m.b701*m.b753 + 0.5*m.b701*m.b756 + 0.5*m.b701*m.b764 + 0.5*
m.b701*m.b766 + 0.5*m.b701*m.b771 + 0.5*m.b701*m.b783 + 0.5*m.b701*m.b786 + 0.5*m.b701*m.b792 +
0.5*m.b701*m.b793 + 0.5*m.b701*m.b796 + 0.5*m.b701*m.b797 + 0.5*m.b701*m.b800 + 0.5*m.b701*m.b802
+ 0.5*m.b701*m.b806 + 0.5*m.b701*m.b807 + 0.5*m.b701*m.b810 + m.b701*m.b828 + m.b701*m.b830 +
0.5*m.b701*m.b833 + 0.5*m.b702*m.b703 + 0.5*m.b702*m.b716 + 0.5*m.b702*m.b724 + 0.5*m.b702*m.b725
+ 0.5*m.b702*m.b726 + 0.5*m.b702*m.b728 + 0.5*m.b702*m.b729 + 0.5*m.b702*m.b736 + 0.5*m.b702*
m.b757 + 0.5*m.b702*m.b758 + 0.5*m.b702*m.b762 + 0.5*m.b702*m.b764 + 0.5*m.b702*m.b767 + 0.5*
m.b702*m.b772 + 0.5*m.b702*m.b773 + 0.5*m.b702*m.b774 + 0.5*m.b702*m.b775 + 0.5*m.b702*m.b780 +
m.b702*m.b781 + m.b702*m.b787 + 0.5*m.b702*m.b797 + 0.5*m.b702*m.b803 + 0.5*m.b702*m.b815 +
m.b702*m.b820 + m.b702*m.b821 + 0.5*m.b702*m.b822 + 0.5*m.b702*m.b833 + 0.5*m.b702*m.b835 + 0.5*
m.b702*m.b836 + 0.5*m.b703*m.b705 + 0.5*m.b703*m.b706 + 0.5*m.b703*m.b708 + 0.5*m.b703*m.b709 +
0.5*m.b703*m.b716 + 0.5*m.b703*m.b722 + 0.5*m.b703*m.b723 + 0.5*m.b703*m.b725 + 0.5*m.b703*m.b732
+ 0.5*m.b703*m.b742 + 0.5*m.b703*m.b754 + 0.5*m.b703*m.b764 + m.b703*m.b772 + 0.5*m.b703*m.b773
+ m.b703*m.b774 + | |
<filename>habitat/tasks/nav/nav_task.py
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, List, Optional, Type, Tuple
import attr
import cv2
import numpy as np
from gym import spaces
import lazy_property
import math
import quaternion
from typing import Union
from habitat.utils.geometry_utils import (
angle_between_quaternions,
quaternion_from_two_vectors,
)
import habitat_sim.utils
from habitat.config import Config
from habitat.core.simulator import SimulatorActions
from habitat.core.dataset import Dataset, Episode
from habitat.core.embodied_task import EmbodiedTask, Measure, Measurements
from habitat.core.registry import registry
from habitat.core.simulator import (
Sensor,
SensorSuite,
SensorTypes,
ShortestPathPoint,
Simulator,
)
from habitat.core.utils import not_none_validator
from habitat.tasks.utils import cartesian_to_polar, quaternion_rotate_vector
from habitat.utils.visualizations import maps
COLLISION_PROXIMITY_TOLERANCE: float = 1e-3
MAP_THICKNESS_SCALAR: int = 1250
def merge_sim_episode_config(
sim_config: Config, episode: Type[Episode]
) -> Any:
sim_config.defrost()
sim_config.SCENE = episode.scene_id
sim_config.freeze()
if (
episode.start_position is not None
and episode.start_rotation is not None
):
agent_name = sim_config.AGENTS[sim_config.DEFAULT_AGENT_ID]
agent_cfg = getattr(sim_config, agent_name)
agent_cfg.defrost()
agent_cfg.START_POSITION = episode.start_position
agent_cfg.START_ROTATION = episode.start_rotation
agent_cfg.IS_SET_START_STATE = True
agent_cfg.freeze()
return sim_config
@attr.s(auto_attribs=True, kw_only=True)
class NavigationGoal:
r"""Base class for a goal specification hierarchy.
"""
position: List[float] = attr.ib(default=None, validator=not_none_validator)
radius: Optional[float] = None
@attr.s(auto_attribs=True, kw_only=True)
class ObjectGoal(NavigationGoal):
r"""Object goal that can be specified by object_id or position or object
category.
"""
object_id: str = attr.ib(default=None, validator=not_none_validator)
object_name: Optional[str] = None
object_category: Optional[str] = None
room_id: Optional[str] = None
room_name: Optional[str] = None
@attr.s(auto_attribs=True, kw_only=True)
class RoomGoal(NavigationGoal):
r"""Room goal that can be specified by room_id or position with radius.
"""
room_aabb: Tuple[float] = attr.ib(default=None, validator=not_none_validator)
# room_id: str = attr.ib(default=None, validator=not_none_validator)
room_name: str = attr.ib(default=None, validator=not_none_validator)
class SE3:
def __init__(self, rot, trans):
self.rot = rot
self.trans = trans
def inv(self):
rot_inv = self.rot.inverse()
return SE3(quaternion.as_rotation_matrix(rot_inv), -self.trans)
@attr.s(auto_attribs=True, kw_only=True)
class NavigationEpisode(Episode):
r"""Class for episode specification that includes initial position and
rotation of agent, scene name, goal and optional shortest paths. An
episode is a description of one task instance for the agent.
Args:
episode_id: id of episode in the dataset, usually episode number
scene_id: id of scene in scene dataset
start_position: numpy ndarray containing 3 entries for (x, y, z)
start_rotation: numpy ndarray with 4 entries for (x, y, z, w)
elements of unit quaternion (versor) representing agent 3D
orientation. ref: https://en.wikipedia.org/wiki/Versor
goals: list of goals specifications
start_room: room id
shortest_paths: list containing shortest paths to goals
"""
goals: List[NavigationGoal] = attr.ib(
default=None, validator=not_none_validator
)
start_room: Optional[str] = None
shortest_paths: Optional[List[ShortestPathPoint]] = None
@attr.s(auto_attribs=True, kw_only=True)
class RoomNavigationEpisode(NavigationEpisode):
r"""Class for episode specification that includes initial position and
rotation of agent, scene name, goal and optional shortest paths. An
episode is a description of one task instance for the agent.
Args:
episode_id: id of episode in the dataset, usually episode number
scene_id: id of scene in scene dataset
start_position: numpy ndarray containing 3 entries for (x, y, z)
start_rotation: numpy ndarray with 4 entries for (x, y, z, w)
elements of unit quaternion (versor) representing agent 3D
orientation. ref: https://en.wikipedia.org/wiki/Versor
goals: list of goals specifications
start_room: room id
shortest_paths: list containing shortest paths to goals
"""
goals: List[RoomGoal] = attr.ib(
default=None, validator=not_none_validator
)
@registry.register_sensor
class EpisodicGPSAndCompassSensor(Sensor):
def __init__(self, sim: Simulator, config: Config):
self._sim = sim
super().__init__(config=config)
def _get_uuid(self, *args: Any, **kwargs: Any):
return "gps_and_compass"
def _get_sensor_type(self, *args: Any, **kwargs: Any):
return SensorTypes.POSITION
def _get_observation_space(self, *args: Any, **kwargs: Any):
return spaces.Box(
low=np.finfo(np.float32).min,
high=np.finfo(np.float32).max,
shape=(3,),
dtype=np.float32,
)
def get_observation(self, observations, episode):
state = self._sim.get_agent_state()
rot = quaternion.as_rotation_matrix(state.rotation)
trans = state.position
# look_dir = habitat_sim.utils.quat_rotate_vector(state.rotation, habitat_sim.geo.FRONT)
# theta = np.array([np.rad2deg(np.arctan2(look_dir[0], -look_dir[2]))]).astype(np.float64)
theta = np.array([np.rad2deg(np.arctan2(rot[0,2], rot[0,0]))]).astype(np.float64)
trans = np.array([trans[0], trans[2]]).astype(np.float64)
return np.concatenate([theta, trans]).astype(np.float64)
@registry.register_sensor
class AgentRotationSensor(Sensor):
def __init__(self, sim: Simulator, config: Config):
self._sim = sim
super().__init__(config=config)
def _get_uuid(self, *args: Any, **kwargs: Any):
return "agent_rotation"
def _get_sensor_type(self, *args: Any, **kwargs: Any):
return SensorTypes.POSITION
def _get_observation_space(self, *args: Any, **kwargs: Any):
return spaces.Box(
low=np.finfo(np.float32).min,
high=np.finfo(np.float32).max,
shape=(3,3),
dtype=np.float32,
)
def get_observation(self, observations, episode):
agent_state = self._sim.get_agent_state()
rotation_world_agent = agent_state.rotation
return quaternion.as_rotation_matrix(rotation_world_agent)
@registry.register_sensor
class AgentPositionSensor(Sensor):
def __init__(self, sim: Simulator, config: Config):
self._sim = sim
super().__init__(config=config)
def _get_uuid(self, *args: Any, **kwargs: Any):
return "agent_position"
def _get_sensor_type(self, *args: Any, **kwargs: Any):
return SensorTypes.POSITION
def _get_observation_space(self, *args: Any, **kwargs: Any):
return spaces.Box(
low=np.finfo(np.float32).min,
high=np.finfo(np.float32).max,
shape=(3,),
dtype=np.float32,
)
def get_observation(self, observations, episode):
agent_state = self._sim.get_agent_state()
ref_position = agent_state.position
return np.array([ref_position[0], ref_position[1], ref_position[2]]).astype(np.float64)
@registry.register_sensor
class PointGoalSensor(Sensor):
r"""Sensor for PointGoal observations which are used in the PointNav task.
For the agent in simulator the forward direction is along negative-z.
In polar coordinate format the angle returned is azimuth to the goal.
Args:
sim: reference to the simulator for calculating task observations.
config: config for the PointGoal sensor. Can contain field for
GOAL_FORMAT which can be used to specify the format in which
the pointgoal is specified. Current options for goal format are
cartesian and polar.
Attributes:
_goal_format: format for specifying the goal which can be done
in cartesian or polar coordinates.
"""
def __init__(self, sim: Simulator, config: Config):
self._sim = sim
self._goal_format = getattr(config, "GOAL_FORMAT", "CARTESIAN")
assert self._goal_format in ["CARTESIAN", "POLAR"]
super().__init__(config=config)
def _get_uuid(self, *args: Any, **kwargs: Any):
return "pointgoal"
def _get_sensor_type(self, *args: Any, **kwargs: Any):
return SensorTypes.PATH
def _get_observation_space(self, *args: Any, **kwargs: Any):
if self._goal_format == "CARTESIAN":
sensor_shape = (3,)
else:
sensor_shape = (2,)
return spaces.Box(
low=np.finfo(np.float32).min,
high=np.finfo(np.float32).max,
shape=sensor_shape,
dtype=np.float32,
)
def get_observation(self, observations, episode):
agent_state = self._sim.get_agent_state()
ref_position = agent_state.position
rotation_world_agent = agent_state.rotation
direction_vector = (
np.array(episode.goals[0].position, dtype=np.float32)
- ref_position
)
direction_vector_agent = quaternion_rotate_vector(
rotation_world_agent.inverse(), direction_vector
)
if self._goal_format == "POLAR":
rho, phi = cartesian_to_polar(
-direction_vector_agent[2], direction_vector_agent[0]
)
direction_vector_agent = np.array([rho, -phi], dtype=np.float32)
return direction_vector_agent
@registry.register_sensor
class RoomGoalSensor(Sensor):
r"""Sensor for RoomGoal observations which are used in the RoomNav task.
For the agent in simulator the forward direction is along negative-z.
In polar coordinate format the angle returned is azimuth to the goal.
Args:
sim: reference to the simulator for calculating task observations.
config: config for the RoomGoal sensor. Can contain field for
GOAL_FORMAT which can be used to specify the format in which
the roomgoal is specified. Current options for goal format are
cartesian and polar.
Attributes:
_goal_format: format for specifying the goal which can be done
in cartesian or polar coordinates.
"""
def __init__(self, sim: Simulator, config: Config):
self._sim = sim
self.room_name_to_id = {
'bathroom': 0,
'bedroom': 1,
'dining room': 2,
'kitchen': 3,
'living room': 4
}
# self._goal_format = getattr(config, "GOAL_FORMAT", "CARTESIAN")
# assert self._goal_format in ["CARTESIAN", "POLAR"]
super().__init__(config=config)
def _get_uuid(self, *args: Any, **kwargs: Any):
return "roomgoal"
def _get_sensor_type(self, *args: Any, **kwargs: Any):
return SensorTypes.PATH
def _get_observation_space(self, *args: Any, **kwargs: Any):
return spaces.Box(
low=min(self.room_name_to_id.values()),
high=max(self.room_name_to_id.values()),
shape=(1,),
dtype=np.int64,
)
def get_observation(self, observations, episode):
return np.array([self.room_name_to_id[episode.goals[0].room_name]])
@registry.register_sensor
class StaticPointGoalSensor(Sensor):
r"""Sensor for PointGoal observations which are used in the StaticPointNav
task. For the agent in simulator the forward direction is along negative-z.
In polar coordinate format the angle returned is azimuth to the goal.
Args:
sim: reference to the simulator for calculating task observations.
config: config for the PointGoal sensor. Can contain field for
GOAL_FORMAT which can be used to specify the format in which
the pointgoal is specified. Current options for goal format are
cartesian and polar.
Attributes:
_goal_format: format for specifying the goal which can be done
in cartesian or polar coordinates.
"""
def __init__(self, sim: Simulator, config: Config):
self._sim = sim
self._goal_format = getattr(config, "GOAL_FORMAT", "CARTESIAN")
assert self._goal_format in ["CARTESIAN", "POLAR"]
super().__init__(sim, config)
self._initial_vector = None
self.current_episode_id = None
def _get_uuid(self, *args: Any, **kwargs: Any):
return "static_pointgoal"
def _get_sensor_type(self, *args: Any, **kwargs: Any):
return SensorTypes.PATH
def _get_observation_space(self, *args: Any, **kwargs: Any):
if self._goal_format == "CARTESIAN":
sensor_shape = (3,)
else:
sensor_shape = (2,)
return spaces.Box(
low=np.finfo(np.float32).min,
high=np.finfo(np.float32).max,
shape=sensor_shape,
dtype=np.float32,
)
def get_observation(self, observations, episode):
episode_id = (episode.episode_id, episode.scene_id)
if self.current_episode_id != episode_id:
# Only compute the direction vector when a new episode is started.
self.current_episode_id = episode_id
agent_state = self._sim.get_agent_state()
ref_position = agent_state.position
rotation_world_agent = agent_state.rotation
direction_vector = (
np.array(episode.goals[0].position, dtype=np.float32)
- ref_position
)
direction_vector_agent = quaternion_rotate_vector(
rotation_world_agent.inverse(), direction_vector
)
if self._goal_format == "POLAR":
rho, phi = cartesian_to_polar(
-direction_vector_agent[2], direction_vector_agent[0]
)
direction_vector_agent = np.array(
[rho, -phi], dtype=np.float32
)
self._initial_vector = direction_vector_agent
return self._initial_vector
@registry.register_sensor
class HeadingSensor(Sensor):
r"""Sensor for observing the agent's heading in the global coordinate
frame.
Args:
sim: reference to the simulator | |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import math
import tensorflow as tf
from IPython import display
from google.colab import output
from witwidget.notebook import base
# Python functions for requests from javascript.
def infer_examples(wit_id):
WitWidget.widgets[wit_id].infer()
output.register_callback('notebook.InferExamples', infer_examples)
def delete_example(wit_id, index):
WitWidget.widgets[wit_id].delete_example(index)
output.register_callback('notebook.DeleteExample', delete_example)
def duplicate_example(wit_id, index):
WitWidget.widgets[wit_id].duplicate_example(index)
output.register_callback('notebook.DuplicateExample', duplicate_example)
def update_example(wit_id, index, example):
WitWidget.widgets[wit_id].update_example(index, example)
output.register_callback('notebook.UpdateExample', update_example)
def get_eligible_features(wit_id):
WitWidget.widgets[wit_id].get_eligible_features()
output.register_callback('notebook.GetEligibleFeatures', get_eligible_features)
def sort_eligible_features(wit_id, details):
WitWidget.widgets[wit_id].sort_eligible_features(details)
output.register_callback('notebook.SortEligibleFeatures', sort_eligible_features)
def infer_mutants(wit_id, details):
WitWidget.widgets[wit_id].infer_mutants(details)
output.register_callback('notebook.InferMutants', infer_mutants)
def compute_custom_distance(wit_id, index, callback_name, params):
WitWidget.widgets[wit_id].compute_custom_distance(index, callback_name,
params)
output.register_callback('notebook.ComputeCustomDistance',
compute_custom_distance)
# HTML/javascript for the WIT frontend.
WIT_HTML = """
<script>
(function() {{
const id = {id};
const wit = document.querySelector("#wit");
wit.style.height = '{height}px';
let mutantFeature = null;
let stagedExamples = [];
let prevExampleCountdown = 0;
let stagedInferences = {{}};
let prevInferencesCountdown = 0;
// Listeners from WIT element events which pass requests to python.
wit.addEventListener("infer-examples", e => {{
google.colab.kernel.invokeFunction(
'notebook.InferExamples', [id], {{}});
}});
wit.addEventListener("compute-custom-distance", e => {{
google.colab.kernel.invokeFunction(
'notebook.ComputeCustomDistance',
[id, e.detail.index, e.detail.callback, e.detail.params],
{{}});
}});
wit.addEventListener("delete-example", e => {{
google.colab.kernel.invokeFunction(
'notebook.DeleteExample', [id, e.detail.index], {{}});
}});
wit.addEventListener("duplicate-example", e => {{
google.colab.kernel.invokeFunction(
'notebook.DuplicateExample', [id, e.detail.index], {{}});
}});
wit.addEventListener("update-example", e => {{
google.colab.kernel.invokeFunction(
'notebook.UpdateExample',
[id, e.detail.index, e.detail.example],
{{}});
}});
wit.addEventListener('get-eligible-features', e => {{
google.colab.kernel.invokeFunction(
'notebook.GetEligibleFeatures', [id], {{}});
}});
wit.addEventListener('infer-mutants', e => {{
mutantFeature = e.detail.feature_name;
google.colab.kernel.invokeFunction(
'notebook.InferMutants', [id, e.detail], {{}});
}});
wit.addEventListener('sort-eligible-features', e => {{
google.colab.kernel.invokeFunction(
'notebook.SortEligibleFeatures', [id, e.detail], {{}});
}});
// Javascript callbacks called by python code to communicate with WIT
// Polymer element.
window.backendError = error => {{
wit.handleError(error.msg);
}};
window.inferenceCallback = res => {{
// If starting a new set of data, reset the staged results.
if (res.countdown >= prevInferencesCountdown) {{
stagedInferences = res.inferences;
}}
prevInferencesCountdown = res.countdown;
for (let i = 0; i < res.results.length; i++) {{
if (wit.modelType == 'classification') {{
stagedInferences.inferences.results[i].classificationResult.classifications.push(...res.results[i]);
}}
else {{
stagedInferences.inferences.results[i].regressionResult.regressions.push(...res.results[i]);
}}
const extras = res.extra[i];
for (let key of Object.keys(extras)) {{
stagedInferences.extra_outputs[i][key].push(...extras[key]);
}}
}}
stagedInferences.inferences.indices.push(...res.indices);
// If this is the final chunk, set the staged results.
if (res.countdown === 0) {{
wit.labelVocab = stagedInferences.label_vocab;
wit.inferences = stagedInferences.inferences;
wit.extraOutputs = {{indices: wit.inferences.indices,
extra: stagedInferences.extra_outputs}};
}}
}};
window.distanceCallback = callbackDict => {{
wit.invokeCustomDistanceCallback(callbackDict);
}};
window.spriteCallback = spriteUrl => {{
if (!wit.updateSprite) {{
requestAnimationFrame(() => window.spriteCallback(spriteUrl));
return;
}}
wit.hasSprite = true;
wit.localAtlasUrl = spriteUrl;
wit.updateSprite();
}};
window.eligibleFeaturesCallback = features => {{
wit.partialDepPlotEligibleFeatures = features;
}};
window.sortEligibleFeaturesCallback = features => {{
wit.partialDepPlotEligibleFeatures = features;
}};
window.inferMutantsCallback = chartInfo => {{
wit.makeChartForFeature(chartInfo.chartType, mutantFeature,
chartInfo.data);
}};
window.configCallback = config => {{
if (!wit.updateNumberOfModels) {{
requestAnimationFrame(() => window.configCallback(config));
return;
}}
if ('inference_address' in config) {{
let addresses = config['inference_address'];
if ('inference_address_2' in config) {{
addresses += ',' + config['inference_address_2'];
}}
wit.inferenceAddress = addresses;
}}
if ('model_name' in config) {{
let names = config['model_name'];
if ('model_name_2' in config) {{
names += ',' + config['model_name_2'];
}}
wit.modelName = names;
}}
if ('model_type' in config) {{
wit.modelType = config['model_type'];
}}
if ('are_sequence_examples' in config) {{
wit.sequenceExamples = config['are_sequence_examples'];
}}
if ('max_classes' in config) {{
wit.maxInferenceEntriesPerRun = config['max_classes'];
}}
if ('multiclass' in config) {{
wit.multiClass = config['multiclass'];
}}
wit.updateNumberOfModels();
if ('target_feature' in config) {{
wit.selectedLabelFeature = config['target_feature'];
}}
if ('uses_custom_distance_fn' in config) {{
wit.customDistanceFunctionSet = true;
}} else {{
wit.customDistanceFunctionSet = false;
}}
}};
window.updateExamplesCallback = res => {{
// If starting a new set of data, reset the staged examples.
if (res.countdown >= prevExampleCountdown) {{
stagedExamples = [];
}}
prevExampleCountdown = res.countdown;
stagedExamples.push(...res.examples);
if (res.countdown === 0) {{
// If this is the final chunk, set the staged examples.
window.commitUpdatedExamples();
}}
}};
window.commitUpdatedExamples = () => {{
if (!wit.updateExampleContents) {{
requestAnimationFrame(() => window.commitUpdatedExamples());
return;
}}
wit.updateExampleContents(stagedExamples, false);
if (wit.localAtlasUrl) {{
window.spriteCallback(wit.localAtlasUrl);
}}
}};
// BroadcastChannels allows examples to be updated by a call from an
// output cell that isn't the cell hosting the WIT widget.
const channelName = 'updateExamples' + id;
const updateExampleListener = new BroadcastChannel(channelName);
updateExampleListener.onmessage = msg => {{
window.updateExamplesCallback(msg.data);
}};
}})();
</script>
"""
class WitWidget(base.WitWidgetBase):
"""WIT widget for colab."""
# Static instance list of constructed WitWidgets so python global functions
# can call into instances of this object
widgets = []
# Static instance index to keep track of ID number of each constructed
# WitWidget.
index = 0
def __init__(self, config_builder, height=1000, delay_rendering=False):
"""Constructor for colab notebook WitWidget.
Args:
config_builder: WitConfigBuilder object containing settings for WIT.
height: Optional height in pixels for WIT to occupy. Defaults to 1000.
delay_rendering: Optional. If true, then do not render WIT on
construction. Instead, only render when render method is called. Defaults
to False.
"""
self._rendering_complete = False
self.id = WitWidget.index
self.height = height
self.set_examples_in_progress = False
# How large of example slices should be sent to the front-end at a time,
# in order to avoid issues with kernel crashes on large messages.
self.SLICE_SIZE = 10000
base.WitWidgetBase.__init__(self, config_builder)
# Add this instance to the static instance list.
WitWidget.widgets.append(self)
if not delay_rendering:
self.render()
# Increment the static instance WitWidget index counter
WitWidget.index += 1
def render(self):
"""Render the widget to the display."""
# Display WIT Polymer element.
display.display(display.HTML(self._get_element_html()))
display.display(display.HTML(
WIT_HTML.format(height=self.height, id=self.id)))
# Send the provided config and examples to JS.
output.eval_js("""configCallback({config})""".format(
config=json.dumps(self.config)))
self.set_examples_in_progress = True
self._set_examples_looper('updateExamplesCallback({data})')
self.set_examples_in_progress = False
self._generate_sprite()
self._rendering_complete = True
def _get_element_html(self):
return tf.io.gfile.GFile(
'/usr/local/share/jupyter/nbextensions/wit-widget/wit_jupyter.html'
).read()
def set_examples(self, examples):
if self.set_examples_in_progress:
print('Cannot set examples while transfer is in progress.')
return
self.set_examples_in_progress = True
base.WitWidgetBase.set_examples(self, examples)
# If this is called after rendering, use a BroadcastChannel to send
# the updated examples to the visualization. Inside of the ctor, no action
# is necessary as the rendering handles all communication.
if self._rendering_complete:
# Use BroadcastChannel to allow this call to be made in a separate colab
# cell from the cell that displays WIT.
channel_str = """(new BroadcastChannel('updateExamples{}'))""".format(
self.id)
eval_js_str = channel_str + '.postMessage({data})'
self._set_examples_looper(eval_js_str)
self._generate_sprite()
self.set_examples_in_progress = False
def _set_examples_looper(self, eval_js_str):
# Send the set examples to JS in chunks.
num_pieces = math.ceil(len(self.examples) / self.SLICE_SIZE)
i = 0
while num_pieces > 0:
num_pieces -= 1
exs = self.examples[i : i + self.SLICE_SIZE]
piece = {'examples': exs, 'countdown': num_pieces}
output.eval_js(eval_js_str.format(data=json.dumps(piece)))
i += self.SLICE_SIZE
def infer(self):
try:
inferences = base.WitWidgetBase.infer_impl(self)
# Parse out the inferences from the returned stucture and empty the
# structure of contents, keeping its nested structure.
# Chunks of the inference results will be sent to the front-end and
# re-assembled.
indices = inferences['inferences']['indices'][:]
inferences['inferences']['indices'] = []
res2 = []
extra = {}
extra2 = {}
model_inference = inferences['inferences']['results'][0]
if ('extra_outputs' in inferences and len(inferences['extra_outputs']) and
inferences['extra_outputs'][0]):
for key in inferences['extra_outputs'][0]:
extra[key] = inferences['extra_outputs'][0][key][:]
inferences['extra_outputs'][0][key] = []
if 'classificationResult' in model_inference:
res = model_inference['classificationResult']['classifications'][:]
model_inference['classificationResult']['classifications'] = []
else:
res = model_inference['regressionResult']['regressions'][:]
model_inference['regressionResult']['regressions'] = []
if len(inferences['inferences']['results']) > 1:
if ('extra_outputs' in inferences and
len(inferences['extra_outputs']) > 1 and
inferences['extra_outputs'][1]):
for key in inferences['extra_outputs'][1]:
extra2[key] = inferences['extra_outputs'][1][key][:]
inferences['extra_outputs'][1][key] = []
model_2_inference = inferences['inferences']['results'][1]
if 'classificationResult' in model_2_inference:
res2 = model_2_inference['classificationResult']['classifications'][:]
model_2_inference['classificationResult']['classifications'] = []
else:
res2 = model_2_inference['regressionResult']['regressions'][:]
model_2_inference['regressionResult']['regressions'] = []
i = 0
num_pieces = math.ceil(len(indices) / self.SLICE_SIZE)
# Loop over each piece to send.
while num_pieces > 0:
num_pieces -= 1
piece = [res[i : i + self.SLICE_SIZE]]
extra_piece = [{}]
for key in extra:
extra_piece[0][key] = extra[key][i : i + self.SLICE_SIZE]
if res2:
piece.append(res2[i : i + self.SLICE_SIZE])
extra_piece.append({})
for key in extra2:
extra_piece[1][key] = extra2[key][i : i + self.SLICE_SIZE]
ind_piece = indices[i : i + self.SLICE_SIZE]
data = {'results': piece, 'indices': ind_piece, 'extra': extra_piece,
'countdown': num_pieces}
# For the first segment to send, also send the blank inferences
# structure to be filled in. This was cleared of contents above but is
# used | |
# Adapted by <NAME>, 2019
#
# Based on Detectron.pytorch/lib/roi_data/fast_rcnn.py
# Original license text:
# --------------------------------------------------------
# Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
"""Construct minibatches for Fast R-CNN training. Handles the minibatch blobs
that are specific to Fast R-CNN. Other blobs that are generic to RPN, etc.
are handled by their respecitive roi_data modules.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import numpy.random as npr
import logging
from core.config import cfg
import utils_rel.boxes_rel as box_utils_rel
import utils.blob as blob_utils
import utils.fpn as fpn_utils
logger = logging.getLogger(__name__)
def add_rel_blobs(blobs, im_scales, roidb):
"""Add blobs needed for training Fast R-CNN style models."""
# Sample training RoIs from each image and append them to the blob lists
for im_i, entry in enumerate(roidb):
frcn_blobs = _sample_pairs(entry, im_scales[im_i], im_i)
for k, v in frcn_blobs.items():
blobs[k].append(v)
# Concat the training blob lists into tensors
for k, v in blobs.items():
if isinstance(v, list) and len(v) > 0:
blobs[k] = np.concatenate(v)
if cfg.FPN.FPN_ON and cfg.FPN.MULTILEVEL_ROIS:
_add_rel_multilevel_rois(blobs)
return True
def _sample_pairs(roidb, im_scale, batch_idx):
"""Generate a random sample of RoIs comprising foreground and background
examples.
"""
fg_pairs_per_image = cfg.TRAIN.FG_REL_SIZE_PER_IM
pairs_per_image = int(cfg.TRAIN.FG_REL_SIZE_PER_IM / cfg.TRAIN.FG_REL_FRACTION) # need much more pairs since it's quadratic
max_pair_overlaps = roidb['max_pair_overlaps']
if cfg.MODEL.MULTI_RELATION:
prd_gt_overlaps = roidb['prd_gt_overlaps'].toarray()
prd_class_num = prd_gt_overlaps.shape[1]
gt_pair_inds, gt_pair_class = np.where(prd_gt_overlaps > 1.0 - 1e-4)
fg_pair_inds, fg_pair_class = np.where((prd_gt_overlaps >= cfg.TRAIN.FG_THRESH) &
(prd_gt_overlaps <= 1.0 - 1e-4))
hash_gt_pair_inds = prd_class_num * gt_pair_inds + gt_pair_class
hash_fg_pair_inds = prd_class_num * fg_pair_inds + fg_pair_class
fg_pairs_per_this_image = np.minimum(fg_pairs_per_image, hash_gt_pair_inds.size + hash_fg_pair_inds.size)
if hash_fg_pair_inds.size > 0 and fg_pairs_per_this_image > hash_gt_pair_inds.size:
hash_fg_pair_inds = npr.choice(
hash_fg_pair_inds, size=(fg_pairs_per_this_image - hash_gt_pair_inds.size), replace=False)
hash_fg_pair_inds = np.append(hash_fg_pair_inds, hash_gt_pair_inds)
elif fg_pairs_per_this_image <= hash_gt_pair_inds.size:
hash_gt_pair_inds = npr.choice(
hash_gt_pair_inds, size=fg_pairs_per_this_image, replace=False)
hash_fg_pair_inds = hash_gt_pair_inds
else:
hash_fg_pair_inds = hash_gt_pair_inds
blob_dict = {}
if cfg.MODEL.USE_BG:
bg_pair_inds, bg_pair_class_inds = np.where((prd_gt_overlaps < cfg.TRAIN.BG_THRESH_HI))
hash_bg_pair_inds = prd_class_num * bg_pair_inds + bg_pair_class_inds
bg_pairs_per_this_image = pairs_per_image - fg_pairs_per_this_image
bg_pairs_per_this_image = np.minimum(bg_pairs_per_this_image, hash_bg_pair_inds.size)
if hash_bg_pair_inds.size > 0:
hash_bg_pair_inds = npr.choice(
hash_bg_pair_inds, size=bg_pairs_per_this_image, replace=False)
hash_keep_pair_inds = np.append(hash_fg_pair_inds, hash_bg_pair_inds)
multi_prd_labels = np.zeros(hash_keep_pair_inds.size, dtype=np.int32)
multi_prd_labels[:hash_fg_pair_inds.size] = 1.0 #fg_multi_prd_labels
keep_pair_inds = np.append(hash_fg_pair_inds // prd_class_num, hash_bg_pair_inds // prd_class_num)
keep_pair_class = np.append(hash_fg_pair_inds % prd_class_num, hash_bg_pair_inds % prd_class_num)
else:
multi_prd_labels = np.ones(fg_multi_prd_labels.size, dtype=np.int32) #fg_multi_prd_labels
keep_pair_inds = np.append(hash_fg_pair_inds // prd_class_num)
keep_pair_class = np.append(hash_fg_pair_inds % prd_class_num)
blob_dict['multi_prd_labels_int32'] = multi_prd_labels.astype(np.int32, copy=False)
blob_dict['keep_pair_class_int32'] = keep_pair_class.astype(np.int32, copy=False)
blob_dict['fg_size'] = np.array([hash_fg_pair_inds.size], dtype=np.int32)
else:
gt_pair_inds = np.where(max_pair_overlaps > 1.0 - 1e-4)[0]
fg_pair_inds = np.where((max_pair_overlaps >= cfg.TRAIN.FG_THRESH) &
(max_pair_overlaps <= 1.0 - 1e-4))[0]
fg_pairs_per_this_image = np.minimum(fg_pairs_per_image, gt_pair_inds.size + fg_pair_inds.size)
# Sample foreground regions without replacement
if fg_pair_inds.size > 0 and fg_pairs_per_this_image > gt_pair_inds.size:
fg_pair_inds = npr.choice(
fg_pair_inds, size=(fg_pairs_per_this_image - gt_pair_inds.size), replace=False)
fg_pair_inds = np.append(fg_pair_inds, gt_pair_inds)
elif fg_pairs_per_this_image <= gt_pair_inds.size:
gt_pair_inds = npr.choice(
gt_pair_inds, size=fg_pairs_per_this_image, replace=False)
fg_pair_inds = gt_pair_inds
else:
fg_pair_inds = gt_pair_inds
# Label is the class each RoI has max overlap with
fg_prd_labels = roidb['max_prd_classes'][fg_pair_inds]
blob_dict = dict(
fg_prd_labels_int32=fg_prd_labels.astype(np.int32, copy=False))
if cfg.MODEL.USE_BG:
bg_pair_inds = np.where((max_pair_overlaps < cfg.TRAIN.BG_THRESH_HI))[0]
# Compute number of background RoIs to take from this image (guarding
# against there being fewer than desired)
bg_pairs_per_this_image = pairs_per_image - fg_pairs_per_this_image
bg_pairs_per_this_image = np.minimum(bg_pairs_per_this_image, bg_pair_inds.size)
# Sample foreground regions without replacement
if bg_pair_inds.size > 0:
bg_pair_inds = npr.choice(
bg_pair_inds, size=bg_pairs_per_this_image, replace=False)
# logger.info('{} : {}'.format(fg_pair_inds.size, bg_pair_inds.size))
keep_pair_inds = np.append(fg_pair_inds, bg_pair_inds)
all_prd_labels = np.zeros(keep_pair_inds.size, dtype=np.int32)
all_prd_labels[:fg_pair_inds.size] = fg_prd_labels + 1 # class should start from 1
else:
keep_pair_inds = fg_pair_inds
all_prd_labels = fg_prd_labels
blob_dict['all_prd_labels_int32'] = all_prd_labels.astype(np.int32, copy=False)
blob_dict['fg_size'] = np.array([fg_pair_inds.size], dtype=np.int32) # this is used to check if there is at least one fg to learn
sampled_sbj_boxes = roidb['sbj_boxes'][keep_pair_inds]
sampled_obj_boxes = roidb['obj_boxes'][keep_pair_inds]
sampled_all_boxes = roidb['all_boxes']
det_labels = roidb['det_labels']
sampled_sbj_inds = roidb['sbj_id'][keep_pair_inds]
sampled_obj_inds = roidb['obj_id'][keep_pair_inds]
# Scale rois and format as (batch_idx, x1, y1, x2, y2)
sampled_sbj_rois = sampled_sbj_boxes * im_scale
sampled_obj_rois = sampled_obj_boxes * im_scale
sampled_all_rois = sampled_all_boxes * im_scale
repeated_batch_idx = batch_idx * blob_utils.ones((keep_pair_inds.shape[0], 1))
all_boxes_repeated_batch_idx = batch_idx * blob_utils.ones((sampled_all_boxes.shape[0], 1))
sampled_sbj_rois = np.hstack((repeated_batch_idx, sampled_sbj_rois))
sampled_obj_rois = np.hstack((repeated_batch_idx, sampled_obj_rois))
sampled_all_rois = np.hstack((all_boxes_repeated_batch_idx, sampled_all_rois))
int_repeated_batch_idx = batch_idx * np.ones((keep_pair_inds.shape[0], 1), dtype=np.int)
blob_dict['sbj_inds'] = np.hstack((repeated_batch_idx, sampled_sbj_inds.reshape(-1, 1)))
blob_dict['obj_inds'] = np.hstack((repeated_batch_idx, sampled_obj_inds.reshape(-1, 1)))
blob_dict['sbj_rois'] = sampled_sbj_rois
blob_dict['obj_rois'] = sampled_obj_rois
blob_dict['det_rois'] = sampled_all_rois
blob_dict['det_labels'] = det_labels
sampled_rel_rois = box_utils_rel.rois_union(sampled_sbj_rois, sampled_obj_rois)
blob_dict['rel_rois'] = sampled_rel_rois
if cfg.MODEL.USE_SPATIAL_FEAT:
sampled_spt_feat = box_utils_rel.get_spt_features(
sampled_sbj_boxes, sampled_obj_boxes, roidb['width'], roidb['height'])
blob_dict['spt_feat'] = sampled_spt_feat
if cfg.MODEL.USE_FREQ_BIAS:
sbj_labels = roidb['max_sbj_classes'][keep_pair_inds]
obj_labels = roidb['max_obj_classes'][keep_pair_inds]
blob_dict['all_sbj_labels_int32'] = sbj_labels.astype(np.int32, copy=False)
blob_dict['all_obj_labels_int32'] = obj_labels.astype(np.int32, copy=False)
if cfg.MODEL.USE_NODE_CONTRASTIVE_LOSS or cfg.MODEL.USE_NODE_CONTRASTIVE_SO_AWARE_LOSS or cfg.MODEL.USE_NODE_CONTRASTIVE_P_AWARE_LOSS:
nodes_per_image = cfg.MODEL.NODE_SAMPLE_SIZE
max_sbj_overlaps = roidb['max_sbj_overlaps']
max_obj_overlaps = roidb['max_obj_overlaps']
# sbj
# Here a naturally existing assumption is, each positive sbj should have at least one positive obj
sbj_pos_pair_pos_inds = np.where((max_pair_overlaps >= cfg.TRAIN.FG_THRESH))[0]
sbj_pos_obj_pos_pair_neg_inds = np.where((max_sbj_overlaps >= cfg.TRAIN.FG_THRESH) &
(max_obj_overlaps >= cfg.TRAIN.FG_THRESH) &
(max_pair_overlaps < cfg.TRAIN.BG_THRESH_HI))[0]
sbj_pos_obj_neg_pair_neg_inds = np.where((max_sbj_overlaps >= cfg.TRAIN.FG_THRESH) &
(max_obj_overlaps < cfg.TRAIN.FG_THRESH) &
(max_pair_overlaps < cfg.TRAIN.BG_THRESH_HI))[0]
if sbj_pos_pair_pos_inds.size > 0:
sbj_pos_pair_pos_inds = npr.choice(
sbj_pos_pair_pos_inds,
size=int(min(nodes_per_image, sbj_pos_pair_pos_inds.size)),
replace=False)
if sbj_pos_obj_pos_pair_neg_inds.size > 0:
sbj_pos_obj_pos_pair_neg_inds = npr.choice(
sbj_pos_obj_pos_pair_neg_inds,
size=int(min(nodes_per_image, sbj_pos_obj_pos_pair_neg_inds.size)),
replace=False)
sbj_pos_pair_neg_inds = sbj_pos_obj_pos_pair_neg_inds
if nodes_per_image - sbj_pos_obj_pos_pair_neg_inds.size > 0 and sbj_pos_obj_neg_pair_neg_inds.size > 0:
sbj_pos_obj_neg_pair_neg_inds = npr.choice(
sbj_pos_obj_neg_pair_neg_inds,
size=int(min(nodes_per_image - sbj_pos_obj_pos_pair_neg_inds.size, sbj_pos_obj_neg_pair_neg_inds.size)),
replace=False)
sbj_pos_pair_neg_inds = np.append(sbj_pos_pair_neg_inds, sbj_pos_obj_neg_pair_neg_inds)
sbj_pos_inds = np.append(sbj_pos_pair_pos_inds, sbj_pos_pair_neg_inds)
binary_labels_sbj_pos = np.zeros(sbj_pos_inds.size, dtype=np.int32)
binary_labels_sbj_pos[:sbj_pos_pair_pos_inds.size] = 1
blob_dict['binary_labels_sbj_pos_int32'] = binary_labels_sbj_pos.astype(np.int32, copy=False)
prd_pos_labels_sbj_pos = roidb['max_prd_classes'][sbj_pos_pair_pos_inds]
prd_labels_sbj_pos = np.zeros(sbj_pos_inds.size, dtype=np.int32)
prd_labels_sbj_pos[:sbj_pos_pair_pos_inds.size] = prd_pos_labels_sbj_pos + 1
blob_dict['prd_labels_sbj_pos_int32'] = prd_labels_sbj_pos.astype(np.int32, copy=False)
sbj_labels_sbj_pos = roidb['max_sbj_classes'][sbj_pos_inds] + 1
# 1. set all obj labels > 0
obj_labels_sbj_pos = roidb['max_obj_classes'][sbj_pos_inds] + 1
# 2. find those negative obj
max_obj_overlaps_sbj_pos = roidb['max_obj_overlaps'][sbj_pos_inds]
obj_neg_inds_sbj_pos = np.where(max_obj_overlaps_sbj_pos < cfg.TRAIN.FG_THRESH)[0]
obj_labels_sbj_pos[obj_neg_inds_sbj_pos] = 0
blob_dict['sbj_labels_sbj_pos_int32'] = sbj_labels_sbj_pos.astype(np.int32, copy=False)
blob_dict['obj_labels_sbj_pos_int32'] = obj_labels_sbj_pos.astype(np.int32, copy=False)
# this is for freq bias in RelDN
blob_dict['sbj_labels_sbj_pos_fg_int32'] = roidb['max_sbj_classes'][sbj_pos_inds].astype(np.int32, copy=False)
blob_dict['obj_labels_sbj_pos_fg_int32'] = roidb['max_obj_classes'][sbj_pos_inds].astype(np.int32, copy=False)
sampled_sbj_boxes_sbj_pos = roidb['sbj_boxes'][sbj_pos_inds]
sampled_obj_boxes_sbj_pos = roidb['obj_boxes'][sbj_pos_inds]
# Scale rois and format as (batch_idx, x1, y1, x2, y2)
sampled_sbj_rois_sbj_pos = sampled_sbj_boxes_sbj_pos * im_scale
sampled_obj_rois_sbj_pos = sampled_obj_boxes_sbj_pos * im_scale
repeated_batch_idx = batch_idx * blob_utils.ones((sbj_pos_inds.shape[0], 1))
sampled_sbj_rois_sbj_pos = np.hstack((repeated_batch_idx, sampled_sbj_rois_sbj_pos))
sampled_obj_rois_sbj_pos = np.hstack((repeated_batch_idx, sampled_obj_rois_sbj_pos))
blob_dict['sbj_rois_sbj_pos'] = sampled_sbj_rois_sbj_pos
blob_dict['obj_rois_sbj_pos'] = sampled_obj_rois_sbj_pos
sampled_rel_rois_sbj_pos = box_utils_rel.rois_union(sampled_sbj_rois_sbj_pos, sampled_obj_rois_sbj_pos)
blob_dict['rel_rois_sbj_pos'] = sampled_rel_rois_sbj_pos
_, inds_unique_sbj_pos, inds_reverse_sbj_pos = np.unique(
sampled_sbj_rois_sbj_pos, return_index=True, return_inverse=True, axis=0)
assert inds_reverse_sbj_pos.shape[0] == sampled_sbj_rois_sbj_pos.shape[0]
blob_dict['inds_unique_sbj_pos'] = inds_unique_sbj_pos
blob_dict['inds_reverse_sbj_pos'] = inds_reverse_sbj_pos
if cfg.MODEL.USE_SPATIAL_FEAT:
sampled_spt_feat_sbj_pos = box_utils_rel.get_spt_features(
sampled_sbj_boxes_sbj_pos, sampled_obj_boxes_sbj_pos, roidb['width'], roidb['height'])
blob_dict['spt_feat_sbj_pos'] = sampled_spt_feat_sbj_pos
# obj
# Here a naturally existing assumption is, each positive obj should have at least one positive sbj
obj_pos_pair_pos_inds = np.where((max_pair_overlaps >= cfg.TRAIN.FG_THRESH))[0]
obj_pos_sbj_pos_pair_neg_inds = np.where((max_obj_overlaps >= cfg.TRAIN.FG_THRESH) &
(max_sbj_overlaps >= cfg.TRAIN.FG_THRESH) &
(max_pair_overlaps < cfg.TRAIN.BG_THRESH_HI))[0]
obj_pos_sbj_neg_pair_neg_inds = np.where((max_obj_overlaps >= cfg.TRAIN.FG_THRESH) &
(max_sbj_overlaps < cfg.TRAIN.FG_THRESH) &
(max_pair_overlaps < cfg.TRAIN.BG_THRESH_HI))[0]
if obj_pos_pair_pos_inds.size > 0:
obj_pos_pair_pos_inds = npr.choice(
obj_pos_pair_pos_inds,
size=int(min(nodes_per_image, obj_pos_pair_pos_inds.size)),
replace=False)
if obj_pos_sbj_pos_pair_neg_inds.size > 0:
obj_pos_sbj_pos_pair_neg_inds = npr.choice(
obj_pos_sbj_pos_pair_neg_inds,
size=int(min(nodes_per_image, obj_pos_sbj_pos_pair_neg_inds.size)),
replace=False)
obj_pos_pair_neg_inds = obj_pos_sbj_pos_pair_neg_inds
if nodes_per_image - obj_pos_sbj_pos_pair_neg_inds.size > 0 and obj_pos_sbj_neg_pair_neg_inds.size:
obj_pos_sbj_neg_pair_neg_inds = npr.choice(
obj_pos_sbj_neg_pair_neg_inds,
size=int(min(nodes_per_image - obj_pos_sbj_pos_pair_neg_inds.size, obj_pos_sbj_neg_pair_neg_inds.size)),
replace=False)
obj_pos_pair_neg_inds = np.append(obj_pos_pair_neg_inds, obj_pos_sbj_neg_pair_neg_inds)
obj_pos_inds = np.append(obj_pos_pair_pos_inds, obj_pos_pair_neg_inds)
binary_labels_obj_pos = np.zeros(obj_pos_inds.size, dtype=np.int32)
binary_labels_obj_pos[:obj_pos_pair_pos_inds.size] = 1
blob_dict['binary_labels_obj_pos_int32'] = binary_labels_obj_pos.astype(np.int32, copy=False)
prd_pos_labels_obj_pos = roidb['max_prd_classes'][obj_pos_pair_pos_inds]
prd_labels_obj_pos = np.zeros(obj_pos_inds.size, dtype=np.int32)
prd_labels_obj_pos[:obj_pos_pair_pos_inds.size] = prd_pos_labels_obj_pos + 1
blob_dict['prd_labels_obj_pos_int32'] = prd_labels_obj_pos.astype(np.int32, copy=False)
obj_labels_obj_pos = roidb['max_obj_classes'][obj_pos_inds] + 1
# 1. set all sbj labels > 0
sbj_labels_obj_pos = roidb['max_sbj_classes'][obj_pos_inds] + 1
# 2. find those negative sbj
max_sbj_overlaps_obj_pos = roidb['max_sbj_overlaps'][obj_pos_inds]
sbj_neg_inds_obj_pos = np.where(max_sbj_overlaps_obj_pos < cfg.TRAIN.FG_THRESH)[0]
sbj_labels_obj_pos[sbj_neg_inds_obj_pos] = 0
blob_dict['sbj_labels_obj_pos_int32'] = sbj_labels_obj_pos.astype(np.int32, copy=False)
blob_dict['obj_labels_obj_pos_int32'] = obj_labels_obj_pos.astype(np.int32, copy=False)
# this is for freq bias in RelDN
blob_dict['sbj_labels_obj_pos_fg_int32'] = roidb['max_sbj_classes'][obj_pos_inds].astype(np.int32, copy=False)
blob_dict['obj_labels_obj_pos_fg_int32'] = roidb['max_obj_classes'][obj_pos_inds].astype(np.int32, copy=False)
sampled_sbj_boxes_obj_pos = roidb['sbj_boxes'][obj_pos_inds]
sampled_obj_boxes_obj_pos = roidb['obj_boxes'][obj_pos_inds]
# Scale rois and format as (batch_idx, x1, y1, | |
<filename>alerter/src/monitors/contracts/chainlink.py
import copy
import json
import logging
from datetime import datetime
from datetime import timedelta
from http.client import IncompleteRead
from typing import List, Dict, Optional, Tuple
import pika
from requests.exceptions import (ConnectionError as ReqConnectionError,
ReadTimeout, ChunkedEncodingError,
MissingSchema, InvalidSchema, InvalidURL)
from urllib3.exceptions import ProtocolError
from web3 import Web3
from web3.exceptions import ContractLogicError
from web3.middleware import geth_poa_middleware
from src.configs.nodes.chainlink import ChainlinkNodeConfig
from src.message_broker.rabbitmq import RabbitMQApi
from src.monitors.monitor import Monitor
from src.utils.constants.abis.v3 import V3_AGGREGATOR, V3_PROXY
from src.utils.constants.abis.v4 import V4_AGGREGATOR, V4_PROXY
from src.utils.constants.rabbitmq import (
RAW_DATA_EXCHANGE, CHAINLINK_CONTRACTS_RAW_DATA_ROUTING_KEY)
from src.utils.data import get_json, get_prometheus_metrics_data
from src.utils.exceptions import (ComponentNotGivenEnoughDataSourcesException,
MetricNotFoundException, PANICException,
CouldNotRetrieveContractsException,
NoSyncedDataSourceWasAccessibleException)
from src.utils.timing import TimedTaskLimiter
_PROMETHEUS_RETRIEVAL_TIME_PERIOD = 86400
_WEI_WATCHERS_RETRIEVAL_TIME_PERIOD = 86400
class ChainlinkContractsMonitor(Monitor):
"""
The ChainlinkContractsMonitor is able to monitor chainlink contracts of an
EVM based chain.
"""
def __init__(self, monitor_name: str, weiwatchers_url: str,
evm_nodes: List[str], node_configs: List[ChainlinkNodeConfig],
logger: logging.Logger, monitor_period: int,
rabbitmq: RabbitMQApi) -> None:
# An exception is raised if the monitor is not given enough data
# sources. The callee must also make sure that the given node_configs
# have valid prometheus urls, and that prometheus and contracts
# monitoring is enabled.
if len(evm_nodes) == 0 or len(node_configs) == 0:
field = 'evm_nodes' if len(evm_nodes) == 0 else 'node_configs'
raise ComponentNotGivenEnoughDataSourcesException(
monitor_name, field)
super().__init__(monitor_name, logger, monitor_period, rabbitmq)
self._node_configs = node_configs
self._contracts_url = weiwatchers_url
# Construct the Web3 interfaces. DISCLAIMER: There might be an issue
# with open connections not being closed.
self._evm_node_w3_interface = {}
for evm_node_url in evm_nodes:
w3_interface = Web3(Web3.HTTPProvider(
evm_node_url, request_kwargs={'timeout': 2}))
w3_interface.middleware_onion.inject(geth_poa_middleware, layer=0)
self._evm_node_w3_interface[evm_node_url] = w3_interface
# This dict stores the eth address of a chainlink node indexed by the
# node id. The eth address is obtained from prometheus.
self._node_eth_address = {}
# This list stores a list of chain contracts data obtained from the wei
# watchers link
self._contracts_data = []
# This dict stores a list of proxy contract addresses that each node
# participates on, indexed by the node id. The contracts addresses are
# also filtered out according to their version. The proxy addresses are
# used because they are immutable.
self._node_contracts = {}
# This dict stores the last block height monitored for a node and
# contract pair. This will be used to monitor round submissions.
# This dict should have the following structure:
# {<node_id>: {<proxy_contract_address>: <last_block_monitored>}}
self._last_block_monitored = {}
# Data retrieval limiters
self._wei_watchers_retrieval_limiter = TimedTaskLimiter(
timedelta(seconds=float(_WEI_WATCHERS_RETRIEVAL_TIME_PERIOD)))
self._eth_address_retrieval_limiter = TimedTaskLimiter(
timedelta(seconds=float(_PROMETHEUS_RETRIEVAL_TIME_PERIOD)))
@property
def node_configs(self) -> List[ChainlinkNodeConfig]:
return self._node_configs
@property
def evm_node_w3_interface(self) -> Dict[str, Web3]:
return self._evm_node_w3_interface
@property
def contracts_url(self) -> str:
return self._contracts_url
@property
def node_eth_address(self) -> Dict[str, str]:
return self._node_eth_address
@property
def contracts_data(self) -> List[Dict]:
return self._contracts_data
@property
def node_contracts(self) -> Dict:
return self._node_contracts
@property
def last_block_monitored(self) -> Dict:
return self._last_block_monitored
@property
def wei_watchers_retrieval_limiter(self) -> TimedTaskLimiter:
return self._wei_watchers_retrieval_limiter
@property
def eth_address_retrieval_limiter(self) -> TimedTaskLimiter:
return self._eth_address_retrieval_limiter
def _get_chain_contracts(self) -> List[Dict]:
"""
This functions retrieves all the chain contracts along with some data.
:return: A list of chain contracts together with data.
"""
return get_json(self.contracts_url, self.logger, None, True)
def _store_chain_contracts(self, contracts_data: List[Dict]) -> None:
"""
This function stores the contracts data in the state
:param contracts_data: The retrieved contracts data
:return: None
"""
self._contracts_data = contracts_data
def _get_nodes_eth_address(self) -> Tuple[Dict, bool]:
"""
This function attempts to get all the Ethereum addresses associated with
each node from the prometheus endpoints. For each node it attempts to
connect with the online source to get the eth address, however if the
required data cannot be obtained from any source, the node is not added
to the output dict, and the second element in the tuple is set to True
indicating that the dict does not contain all node ids.
:return: A tuple with the following structure:
({ node_id: node_eth_address }, bool)
"""
metrics_to_retrieve = {
'eth_balance': 'strict',
}
node_eth_address = {}
error_occurred = False
for node_config in self.node_configs:
for prom_url in node_config.node_prometheus_urls:
try:
metrics = get_prometheus_metrics_data(
prom_url, metrics_to_retrieve, self.logger,
verify=False)
for _, data_subset in enumerate(metrics['eth_balance']):
if "account" in json.loads(data_subset):
eth_address = json.loads(data_subset)['account']
node_eth_address[node_config.node_id] = eth_address
break
break
except (ReqConnectionError, ReadTimeout, InvalidURL,
InvalidSchema, MissingSchema, IncompleteRead,
ChunkedEncodingError, ProtocolError) as e:
# If these errors are raised it may still be that another
# source can be accessed
self.logger.debug("Error when trying to access %s of %s",
prom_url, node_config.node_name)
self.logger.debug(e)
except MetricNotFoundException as e:
# If these errors are raised then we can't get valid data
# from any node, as only 1 node is online at the same time.
self.logger.error("Error when trying to access %s of %s",
prom_url, node_config.node_name)
self.logger.exception(e)
break
# If no ethereum address was added for a node, then an error has
# occurred
if node_config.node_id not in node_eth_address:
error_occurred = True
return node_eth_address, error_occurred
def _store_nodes_eth_addresses(self, node_eth_address: Dict) -> None:
"""
This function stores the node's associated ethereum addresses obtained
from prometheus in the state
:param node_eth_address: A dict associating a node's ID to it's ethereum
: address obtained from prometheus
:return: None
"""
self._node_eth_address = node_eth_address
def _select_node(self) -> Optional[str]:
"""
This function returns the url of the selected node. A node is selected
if the HttpProvider is connected and the node is not syncing.
:return: The url of the selected node.
: None if no node is selected.
"""
for node_url, w3_interface in self._evm_node_w3_interface.items():
try:
if w3_interface.isConnected() and not w3_interface.eth.syncing:
return node_url
except (ReqConnectionError, ReadTimeout, IncompleteRead,
ChunkedEncodingError, ProtocolError, InvalidURL,
InvalidSchema, MissingSchema) as e:
self.logger.debug("Error when trying to access %s", node_url)
self.logger.debug(e)
return None
def _filter_contracts_by_node(self, selected_node: str) -> Dict:
"""
This function checks which contracts a node participates on.
:param selected_node: The evm node selected to retrieve the data from
:return: A dict indexed by the node_id were each value is another dict
: containing a list of v3 and v4 contracts the node participates
: on. The proxy contract address is used to identify a contract.
"""
w3_interface = self.evm_node_w3_interface[selected_node]
node_contracts = {}
for node_id, eth_address in self._node_eth_address.items():
transformed_eth_address = w3_interface.toChecksumAddress(
eth_address)
v3_participating_contracts = []
v4_participating_contracts = []
for contract_data in self._contracts_data:
aggregator_address = contract_data['contractAddress']
proxy_address = contract_data['proxyAddress']
contract_version = contract_data['contractVersion']
if contract_version == 3:
aggregator_contract = w3_interface.eth.contract(
address=aggregator_address, abi=V3_AGGREGATOR)
oracles = aggregator_contract.functions.getOracles().call()
if transformed_eth_address in oracles:
v3_participating_contracts.append(proxy_address)
elif contract_version == 4:
aggregator_contract = w3_interface.eth.contract(
address=aggregator_address, abi=V4_AGGREGATOR)
transmitters = aggregator_contract.functions.transmitters() \
.call()
if transformed_eth_address in transmitters:
v4_participating_contracts.append(proxy_address)
node_contracts[node_id] = {}
node_contracts[node_id]['v3'] = v3_participating_contracts
node_contracts[node_id]['v4'] = v4_participating_contracts
return node_contracts
def _store_node_contracts(self, node_contracts: Dict) -> None:
"""
This function stores the retrieved node_contracts inside the state.
:param node_contracts: The retrieved node_contracts
:return: None
"""
self._node_contracts = node_contracts
def _get_v3_data(self, w3_interface: Web3, node_eth_address: str,
node_id: str) -> Dict:
"""
This function attempts to retrieve the v3 contract metrics for a node
using an evm node as data source.
:param w3_interface: The web3 interface used to get the data
:param node_eth_address: The ethereum address of the node the metrics
: are associated with.
:param node_id: The id of the node the metrics are associated with.
:return: A dict with the following structure:
{
<v3_proxy_contract_address>: {
'contractVersion': 3,
'aggregatorAddress': str
'latestRound': int,
'latestAnswer': int,
'latestTimestamp': float,
'answeredInRound': int
'withdrawablePayment': int,
'historicalRounds': [{
'roundId': int,
'roundAnswer': int/None (if round consensus not reached
yet),
'roundTimestamp': int/None (if round consensus not reached
yet),
'answeredInRound': int/None (if round consensus not reached
yet)
'nodeSubmission': int
}]
}
}
"""
# If this is the case, then the node has no associated contracts stored
if node_id not in self.node_contracts:
return {}
# This is the case for the first monitoring round
if node_id not in self.last_block_monitored:
self._last_block_monitored[node_id] = {}
data = {}
v3_contracts = self.node_contracts[node_id]['v3']
for proxy_address in v3_contracts:
proxy_contract = w3_interface.eth.contract(address=proxy_address,
abi=V3_PROXY)
aggregator_address = proxy_contract.functions.aggregator().call()
aggregator_contract = w3_interface.eth.contract(
address=aggregator_address, abi=V3_AGGREGATOR)
transformed_eth_address = w3_interface.toChecksumAddress(
node_eth_address)
# Get all SubmissionReceived events related to the node in question
# from the last block height not monitored until the current block
# height. Note fromBlock and toBlock are inclusive.
current_block_height = w3_interface.eth.get_block('latest')[
'number']
first_block_to_monitor = self.last_block_monitored[node_id][
proxy_address] + 1 \
if proxy_address in self.last_block_monitored[node_id] \
else current_block_height
event_filter = \
aggregator_contract.events.SubmissionReceived.createFilter(
fromBlock=first_block_to_monitor,
toBlock=current_block_height,
argument_filters={'oracle': transformed_eth_address})
events = event_filter.get_all_entries()
latest_round_data | |
<reponame>wadmp/wadmp.github.io
#! /usr/bin/env python
"""
This script uses the public API to read the latest settings (both Reported and Desired),
for one or multiple sections,
from multiple devices.
<NAME>, January 2020
Copyright Advantech B+B SmartWorx, 2020
Version 0.6
Last tested on Ubuntu 18.04 with Python 3.6, and on Windows 10 with Python 3.7
"""
# Standard library
import argparse
import os
import csv
import json
import sys
import logging
import logging.config
import time
# pip
import requests
BASE_PATH = "api"
def parse_args():
"""Parse command-line arguments"""
parser = argparse.ArgumentParser(
description="Read settings for one section from all devices"
)
# Positional arguments:
parser.add_argument("devices", help="CSV file of devices", type=str, default="ALL")
parser.add_argument("section", help="Section name", type=str, default="snmp")
# Optional arguments:
parser.add_argument(
"-host",
help="URL of the API gateway. \
Default = 'https://gateway.wadmp.com'",
type=str,
default="https://gateway.wadmp.com",
)
parser.add_argument(
"-username",
help="Username. \
Check the code for the default!",
type=str,
default="email address",
)
parser.add_argument(
"-password",
help="Password. \
Check the code for the default!",
type=str,
default="password",
)
parser.add_argument(
"-console_loglevel",
help="Log verbosity level. The higher the level, the fewer messages that will be logged. \
Default = info",
type=str,
choices=["debug", "info", "warning", "error", "critical"],
default="info",
)
parser.add_argument(
"-file_loglevel",
help="Log verbosity level. The higher the level, the fewer messages that will be logged. \
Default = info",
type=str,
choices=["debug", "info", "warning", "error", "critical"],
default="info",
)
args = parser.parse_args()
return args
def main(args):
"""Main function"""
# A log message will only be emitted if the message level is greater than or equal to the configured level of the logger.
LOG_LEVELS = {
"critical": logging.CRITICAL,
"error": logging.ERROR,
"warning": logging.WARNING,
"info": logging.INFO,
"debug": logging.DEBUG,
}
console_loglevel = LOG_LEVELS[args.console_loglevel]
file_loglevel = LOG_LEVELS[args.file_loglevel]
script_name = os.path.splitext(os.path.basename(__file__))[0]
configure_logging(script_name, console_loglevel, file_loglevel)
global logger
logger = logging.getLogger(script_name)
global BASE_URL
BASE_URL = args.host
global SESSION
SESSION = requests.Session()
user_token = login(args.username, args.password)
SESSION.headers.update({"Authorization": f"Bearer {user_token}"})
if args.devices == "ALL":
logger.info("Getting a list of ALL your devices ...")
my_devices = get_devices(100)
logger.info(f"You have {len(my_devices)} devices in total.\n")
else:
logger.info("Opening CSV file of devices ...")
with open(args.devices, encoding="UTF-8", newline="") as csvfile:
csvreader = csv.reader(csvfile, delimiter=",")
next(csvreader) # Skip the first row
my_devices = []
for row in csvreader:
logger.debug(row)
alias, serial_number, order_code, mac, imei, device_type = row
device = {"mac_address": mac}
my_devices.append(device)
logger.info(f"File contains {len(my_devices)} devices in total.\n")
device_count = 0
device_with_specified_section_count = 0
in_sync_count = 0
desired_count = 0
reported_count = 0
for device in my_devices:
device_count += 1
logger.info(f"Device {device['mac_address']}")
fw_app_id, fw_app_version_id = find_fw_ids(device["mac_address"])
if fw_app_id and fw_app_version_id:
logger.info(
f"Firmware application ID {fw_app_id}, application version ID {fw_app_version_id}"
)
try:
os.mkdir("get_settings")
logger.debug("Created the 'get_settings' directory.")
except FileExistsError:
logger.debug("The 'get_settings' directory already exists")
# Windows doesn't allow colons (':') in filenames
device_folder = os.path.join(
"get_settings", device["mac_address"].replace(":", "-")
)
try:
os.mkdir(device_folder)
except FileExistsError:
logger.error(f"The sub-directory {device_folder} already exists!")
logger.error(
"Please delete or move the contents of the 'get_settings' folder before trying again"
)
sys.exit(1)
all_sections = get_sections(fw_app_id, fw_app_version_id)
if not all_sections:
logger.error("No application sections found!")
continue
if args.section == "ALL":
for section in all_sections:
logger.info(f"Section {section['name']}")
settings = get_section_settings(
device["mac_address"], fw_app_version_id, section["id"]
)
logger.info(json.dumps(settings, indent=4, sort_keys=True) + "\n")
if settings:
desired_file = os.path.join(
device_folder, f"{section['name']}_desired.ini"
)
reported_file = os.path.join(
device_folder, f"{section['name']}_reported.ini"
)
if settings[
"desired_configuration"
]: # Can't write 'null' or 'None':
with open(desired_file, "x") as f:
f.write(settings["desired_configuration"])
if settings[
"reported_configuration"
]: # Can't write 'null' or 'None':
with open(reported_file, "x") as f:
f.write(settings["reported_configuration"])
else: # Only one section was specified on the command line
section = search(all_sections, "name", args.section)
if section:
device_with_specified_section_count += 1
settings = get_section_settings(
device["mac_address"], fw_app_version_id, section["id"]
)
if settings:
logger.info(
json.dumps(settings, indent=4, sort_keys=True) + "\n"
)
desired_file = os.path.join(
device_folder, f"{section['name']}_desired.ini"
)
reported_file = os.path.join(
device_folder, f"{section['name']}_reported.ini"
)
with open(desired_file, "x") as f:
if settings[
"desired_configuration"
]: # Can't write 'null' or 'None'
f.write(settings["desired_configuration"])
with open(reported_file, "x") as f:
if settings[
"reported_configuration"
]: # Can't write 'null' or 'None'
f.write(settings["reported_configuration"])
if settings["in_sync"]:
in_sync_count += 1
if settings["desired_configuration"]:
desired_count += 1
if settings["reported_configuration"]:
reported_count += 1
else:
logger.warning(
f"This device does not have a section called {args.section}"
)
else:
logger.error("No firmware application found!")
if args.section != "ALL":
logger.info(
f"{device_count} devices in total, of which {device_with_specified_section_count} have the {args.section} section"
)
logger.info(
f"Of those {device_with_specified_section_count}:\n"
f" {reported_count} have a reported state,\n"
f" {desired_count} have a desired state,\n"
f" {in_sync_count} are in sync.\n"
)
def login(username, password):
"""Login to the system, and return a token"""
url = f"{BASE_URL}/public/auth/connect/token"
credentials = {
"username": username,
"password": password,
"client_id": "python",
"grant_type": "password",
}
logger.debug(
f"Sending POST request to {url} with:\n" f" credentials={credentials}"
)
response = SESSION.post(url, data=credentials)
logger.debug(response.status_code)
try:
logger.debug(json.dumps(response.json(), indent=4, sort_keys=True))
except ValueError:
logger.debug(response.text)
if response.status_code == requests.codes["ok"]:
try:
return response.json()["access_token"]
except json.decoder.JSONDecodeError as err:
logger.error(f"Problem decoding JSON!\n{err}")
return None
except KeyError as err:
logger.error(f"Didn't find what we expected in the JSON response!\n{err}")
return None
else:
logger.error(f"Failed to login! {response.status_code}")
try:
logger.error(f"{response.json()['message']}")
except json.decoder.JSONDecodeError as err:
logger.error(f"Problem decoding JSON!\n{err}")
except KeyError as err:
logger.error(response.json())
sys.exit(1)
def get_devices(page_size):
"""Retrieves the list of your devices.
Requests are paged, but this function automatically aggregates responses into one complete list.
"""
page_number = 1
total, devices = get_one_page_of_devices(page_number, page_size)
while len(devices) < total:
logger.debug(f"{len(devices)} out of {total} ...")
page_number += 1
total, page = get_one_page_of_devices(page_number, page_size)
devices.extend(page)
return devices
def get_one_page_of_devices(page_number, page_size):
"""Retrieves one page of the list of your devices."""
url = f"{BASE_URL}/{BASE_PATH}/management/devices"
# The only REQUIRED query parameters are page and pageSize
logger.debug(
f"Sending GET request to {url} with:\n"
f" page={page_number}\n"
f" pageSize={page_size}"
)
query = {"page": page_number, "pageSize": page_size}
response = SESSION.get(url, params=query)
logger.debug(response.status_code)
try:
logger.debug(json.dumps(response.json(), indent=4, sort_keys=True))
except ValueError:
logger.debug(response.text)
try:
total = response.json()["total_items"]
except json.decoder.JSONDecodeError as err:
logger.error(f"Problem decoding JSON!\n{err}")
return None, None
except KeyError as err:
logger.error(f"Didn't find what we expected in the JSON response!\n{err}")
return None, None
if response.status_code == requests.codes["ok"]:
try:
return total, response.json()["data"]
except json.decoder.JSONDecodeError as err:
logger.error(f"Problem decoding JSON!\n{err}")
return total, None
except KeyError as err:
logger.error(f"Didn't find what we expected in the JSON response!\n{err}")
return total, None
else:
logger.error(f"Failed to retrieve page {page_number}! {response.status_code}")
try:
logger.error(f"{response.json()['message']}")
except json.decoder.JSONDecodeError as err:
logger.error(f"Problem decoding JSON!\n{err}")
except KeyError as err:
logger.error(response.json())
return None, None
def get_applications_in_device(mac):
"""Gets apps installed in a device."""
url = f"{BASE_URL}/{BASE_PATH}/management/devices/{mac}/apps"
logger.debug(f"Sending GET request to {url}")
response = SESSION.get(url)
logger.debug(response.status_code)
try:
logger.debug(json.dumps(response.json(), indent=4, sort_keys=True))
except ValueError:
logger.debug(response.text)
if response.status_code == requests.codes["ok"]:
try:
return response.json()["data"]
except json.decoder.JSONDecodeError as err:
logger.error(f"Problem decoding JSON!\n{err}")
return None
except KeyError as err:
logger.error(f"Didn't find what we expected in the JSON response!\n{err}")
return None
else:
logger.error(
f"Failed to retrieve the list of Applications! {response.status_code}"
)
try:
logger.error(f"{response.json()['message']}")
except json.decoder.JSONDecodeError as err:
logger.error(f"Problem decoding JSON!\n{err}")
except KeyError as err:
logger.error(response.json())
return None
def get_sections(app_id, version_id):
"""Gets an application version details by its id.
Returns the array of sections associated with that version.
"""
url = f"{BASE_URL}/{BASE_PATH}/applications/{app_id}/versions/{version_id}"
logger.debug(f"Sending GET request to {url}")
response = SESSION.get(url)
logger.debug(response.status_code)
try:
logger.debug(json.dumps(response.json(), indent=4, sort_keys=True))
except ValueError:
logger.debug(response.text)
if response.status_code == requests.codes["ok"]:
try:
return response.json()["data"]["sections"]
except json.decoder.JSONDecodeError as err:
logger.error(f"Problem decoding JSON!\n{err}")
return None
except KeyError as err:
logger.error(f"Didn't find what we expected in the JSON response!\n{err}")
return None
else:
logger.error(f"Failed to get the version details! {response.status_code}")
try:
logger.error(f"{response.json()['message']}")
except json.decoder.JSONDecodeError as err:
logger.error(f"Problem decoding JSON!\n{err}")
except KeyError as err:
logger.error(response.json())
return None
def find_fw_ids(mac):
"""For the given device, find the app ID and the app version ID for the firmware app."""
fw_app_id = None
fw_app_version_id = None
apps = get_applications_in_device(mac)
if apps:
for app in apps:
if app["application_version"]["application"]["is_firmware"]:
fw_app_id = app["application_version"]["application"]["id"]
fw_app_version_id = app["application_version"]["id"]
break
return fw_app_id, fw_app_version_id
def search(array, key, value):
"""Generic function to find a particular dictionary in a list of dictionaries,
based on one key:value pair in each dict.
"""
for item in array:
if item[key] == value:
return item
return None
def get_section_settings(mac, version_id, section_id):
"""Gets the desired and reported settings of a specific section of an app in a device."""
url = f"{BASE_URL}/{BASE_PATH}/management/devices/{mac}/apps/{version_id}/settings/{section_id}"
logger.debug(f"Sending GET request to {url}")
response = SESSION.get(url)
logger.debug(response.status_code)
try:
logger.debug(json.dumps(response.json(), indent=4, sort_keys=True))
except ValueError:
logger.debug(response.text)
if response.status_code == requests.codes["ok"]:
try:
return response.json()["data"]
except json.decoder.JSONDecodeError as err:
logger.error(f"Problem decoding JSON!\n{err}")
return None
except KeyError as err:
logger.error(f"Didn't find what we expected in the | |
file will have higher perf
lines = [file.readline().decode('ascii') for i in range(page_size)]
buf = io.StringIO('\n'.join((l for l in lines if not l.startswith('!'))))
# buf = io.StringIO('\n'.join(lines))
# lines = [file.readline() for i in range(page_size)]
# buf = io.BytesIO(b''.join(lines))
opts = dict(delim_whitespace=True, comment='!',
header=None, escapechar='\\',
nrows=page_size, skip_blank_lines=True,
skipinitialspace=True,
doublequote=False,
dtype=pd_column_dict,
engine='c',
low_memory=False,
na_filter=False,
na_values=None,
keep_default_na=False)
# iowrap = io.TextIOWrapper(file, encoding='ascii')
# df = pd.read_table(iowrap, **opts)
# iowrap.detach()
df = pd.read_table(buf, encoding='ascii', **opts)
# df = pd.read_table(file, encoding='ascii', **opts)
# print(df.dtypes)
# Assign data to the columns
if not page_skip:
col_idx_active = 0
for i, c in enumerate(sdds.columns):
if columns_mask[i]:
c.data.append(df.loc[:, col_idx_active].values)
c._page_numbers.append(page_idx)
col_idx_active += 1
page_stored_idx += 1
page_idx += 1
elif _ASCII_TEXT_PARSE_METHOD == 'shlex':
columns_data = []
if not page_skip:
for i, c in enumerate(sdds.columns):
if columns_mask[i]:
columns_data.append(np.empty(page_size, dtype=columns_store_type[i]))
for row in range(page_size):
line = __get_next_line(file, accept_meta_commands=False, strip=True)
if not page_skip:
line_len = len(line)
if line_len == 0:
raise ValueError(f'Unexpected empty string at position {file.tell()}')
col_idx_active = 0
col_idx = 0
values = shlex.split(line, posix=True)
if TRACE:
logger.debug(f'>COL ROW {row} | {len(values)}: {values=}')
for c in sdds.columns:
if columns_mask[col_idx]:
t = columns_type[col_idx]
if t == object:
value = values[col_idx]
else:
value = np.fromstring(values[col_idx], dtype=t, count=1, sep=' ')[0]
columns_data[col_idx_active][row] = value
if TRACE:
logger.debug(f'>>CR {row=} | {c.name}:{value}')
col_idx_active += 1
col_idx += 1
# Assign data to the columns
if not page_skip:
col_idx_active = 0
for i, c in enumerate(sdds.columns):
if columns_mask[i]:
c.data.append(columns_data[col_idx_active])
c._page_numbers.append(page_idx)
col_idx_active += 1
page_stored_idx += 1
page_idx += 1
# elif _ASCII_TEXT_PARSE_METHOD == 'state_machine':
# # Columns data init
# columns_data = []
# if not page_skip:
# for i, c in enumerate(sdds.columns):
# if columns_mask[i]:
# columns_data.append(np.empty(page_size, dtype=columns_store_type[i]))
# # a simple state machine that seeks using two indices (start, end)
# col_idx = 0
# col_idx_active = 0
# pointer_last = 0
# if TRACE:
# logger.debug(f'>COL ROW {row}')
#
# # Two state flag booleans (instead of an enum, for performance reasons)
# # True if inside quotes ("), and all characters should be treated literally, False otherwise
# is_literal_mode = False
# # Next character will be treated as escaped
# is_escape_mode = False
# value_contains_escape_sequences = False
# # True if scanning within a value, False if scanning the space between columns
# is_reading_spacing = True
#
# for pointer in range(line_len):
# char = line[pointer]
# if char == '!':
# # everything afterwards should be ignored
# assert not is_literal_mode
# assert is_reading_spacing
# logger.debug(f'>>CR {pointer=} {row=} > {char=} COMMENT SKIP REST')
# break
# if TRACE:
# logger.debug(
# f'>>CR {row=} | {col_idx=} | {col_idx_active=} | {pointer_last=} | {pointer=} | {char=}')
# if is_reading_spacing:
# if char == ' ':
# # skip spaces
# pointer_last = pointer
# continue
# else:
# # start reading next value
# pointer_last = pointer
# is_reading_spacing = False
#
# if char == ' ' or pointer == line_len - 1:
# if is_escape_mode:
# raise Exception
# if pointer == line_len - 1:
# if is_literal_mode:
# # Closing quote of line
# assert char == '"'
# is_literal_mode = False
# # shift by one at end of string
# pointer += 1
# if is_literal_mode:
# # advance
# continue
# else:
# # end of value
# # we should not be in literal mode
# assert not is_literal_mode
# # add to data if column in mask
# if columns_mask[col_idx]:
# value_str = line[pointer_last:pointer]
# if columns_type[col_idx] == str:
# if value_str.startswith('"') and value_str.endswith('"'):
# value = value_str[1:-1]
# else:
# value = value_str
# if value_contains_escape_sequences:
# value = value.replace('\\"', '"')
# else:
# value = np.fromstring(value_str, dtype=columns_type[col_idx], count=1, sep=' ')[0]
# columns_data[col_idx_active][row] = value
# col_idx_active += 1
# if TRACE:
# logger.debug(
# f'>>CR {row=} | {file.tell()} | {pointer_last=} | {pointer=} | {line[pointer_last:pointer]} | {value}')
# else:
# # l.debug(f'>>CR {row=} | {file.tell()} | {pointer_last=} | {pointer=} | {line[pointer_last:pointer]} | SKIP')
# pass
# is_reading_spacing = True
# col_idx += 1
# elif char == '"':
# if not is_escape_mode:
# # literal mode toggle
# is_literal_mode = not is_literal_mode
# else:
# is_escape_mode = False
# continue
# elif char == '\\':
# if not is_escape_mode:
# is_escape_mode = True
# value_contains_escape_sequences = True
# else:
# continue
# else:
# if is_escape_mode:
# is_escape_mode = False
# # any other characted gets added to value
# continue
# # Sanity checks
# assert col_idx == len(sdds.columns)
# assert 1 <= col_idx_active <= col_idx
else:
raise Exception(f'Unrecognized parse method: {_ASCII_TEXT_PARSE_METHOD}')
while True:
# Look for next important character (this is rough heuristic)
next_byte = file.peek(1)
if len(next_byte) > 0:
next_char = next_byte[:1].decode('ascii')
#print(repr(next_char))
if next_char == '\n':
file.read(1)
continue
else:
logger.debug(f'Found character {repr(next_char)} at {file.tell()}, continuing to next page')
break
else:
break
if len(next_byte) > 0:
# More data exists
if pages_mask is not None and page_idx == len(pages_mask):
logger.warning(f'Mask {pages_mask} ended but have at least {len(next_byte)} extra bytes - stopping')
break
else:
# End of file
break
sdds.n_pages = page_stored_idx
def _read_pages_ascii_numeric_lines(file: IO[bytes],
sdds: SDDSFile,
arrays_mask: List[bool],
columns_mask: List[bool],
pages_mask: List[bool]) -> None:
""" Line by line numeric data parser for lines_per_row == 1 """
parameters = sdds.parameters
parameter_types = [_NUMPY_DTYPES[el.type] for el in parameters]
logger.debug(f'Parameter types: {parameter_types}')
arrays = sdds.arrays
arrays_type = [_NUMPY_DTYPES[el.type] for el in arrays]
columns = sdds.columns
columns_type = [_NUMPY_DTYPES[el.type] for el in columns]
columns_store_type = [_NUMPY_DTYPE_FINAL[el.type] for el in columns]
assert object not in columns_type
struct_type = np.dtype(', '.join(columns_type))
logger.debug(f'Column types: {columns_type}')
logger.debug(f'struct_type: {struct_type}')
page_idx = 0
page_stored_idx = 0
# Flag for eof since can't break out of two loops
while True:
if pages_mask is not None:
if page_idx >= len(pages_mask):
logger.debug(f'Reached last page {page_idx} in mask, have at least 1 more remaining but exiting early')
break
else:
page_skip = pages_mask[page_idx]
else:
page_skip = False
if page_skip:
logger.debug(f'>>PG | pos {file.tell()} | skipping page {page_idx}')
else:
logger.debug(f'>>PG | pos %d | reading page %d', file.tell(), page_idx)
# Read parameters
parameter_data = []
par_idx = 0
par_line_num = 0
while par_idx < len(parameter_types):
b_array = __get_next_line(file)
if b_array is None:
raise Exception(f'>>PARS | pos {file.tell()} | unexpected EOF at page {page_idx}')
par_line_num += 1
if par_line_num > 10000:
raise Exception('Did not finish parsing parameters after 10000 lines - something is wrong')
if parameter_types[par_idx] == object:
value = b_array.strip()
# Indicates a variable length string
if value.startswith('"') and value.endswith('"'):
value = value[1:-1]
if TRACE:
logger.debug(
f'>>PARS | pos {file.tell()} | {par_idx=} | {parameter_types[par_idx]} | {repr(b_array)} | {value}')
else:
# Primitive types
value = np.fromstring(b_array, dtype=parameter_types[par_idx], sep=' ', count=1)[0]
if TRACE:
logger.debug(
f'>>PARV | pos {file.tell()} | {par_idx=} | {parameter_types[par_idx]} | {repr(b_array)} | {value}')
parameter_data.append(value)
par_idx += 1
# Assign data to the parameters
if not page_skip:
for i, el in enumerate(parameters):
el.data.append(parameter_data[i])
array_idx = 0
while array_idx < len(arrays_type):
a = sdds.arrays[array_idx]
mapped_t = arrays_type[array_idx]
# Array dimensions
b_array = __get_next_line(file)
if b_array is None:
raise Exception(f'>>ARRS | pos {file.tell()} | unexpected EOF at page {page_idx}')
dimensions = np.fromstring(b_array, dtype=int, sep=' ', count=-1)
n_elements = np.prod(dimensions)
if len(dimensions) != a.dimensions:
raise ValueError(f'>>Array {a.name} dimensions {b_array} did not match expected count {a.dimensions}')
logger.debug(f'>>Array {a.name} has dimensions {dimensions}, total of {n_elements}')
# Start reading array
n_lines_read = 0
n_elements_read = 0
line_values = []
if arrays_type[array_idx] == object:
# Strings need special treatment
while True:
b_array = __get_next_line(file).strip()
n_lines_read += 1
if b_array is None:
raise Exception(f'>>ARRV | {file.tell()} | unexpected EOF at page {page_idx}')
values = shlex.split(b_array, posix=True)
logger.debug(
f'>>ARRV | {file.tell()} | {array_idx=} | {mapped_t} | {repr(b_array)} | {values} | {n_elements=} | {n_lines_read=}')
n_elements_read += len(values)
line_values.append(values)
if n_elements_read < n_elements:
continue
elif n_elements_read == n_elements:
# Done
break
else:
raise Exception(
f'Too many elements read during array parsing: {n_elements_read} (need {n_elements})')
else:
# Primitive types
while True:
| |
1,
int(ceil(D / blockdim[2])) if int(ceil(D / blockdim[2])) != 0 else 1)
if self.network_struc[i]['Type'] == 'conv':
# Set conv params
alpha = self.network_struc[i]['alpha']
beta = self.network_struc[i]['beta']
delay = self.network_struc[i]['delay']
C = self.layers[i]['C'][:, :, :] # Output delay counter before
I = self.layers[i]['I'][:, :, :] # Output voltage before
#if (i == 5) & (t == self.total_time-1):
# print("Layer anterior (pre) " + str(i-1) + ' ' + str(self.network_struc[i-1]['Type']) +
# " spikes: " + str(np.count_nonzero(self.layers[i - 1]['S'][:, :, :, :])))
#print("I antes:")
#print(I)
#print("V antes:")
#print(V)
if (self.network_struc[i-1]['Type'] == 'P_conv') | (self.network_struc[i-1]['Type'] == 'P_pool'):
V, I, S, C = self.parallel_convolution(S, I, V, C, s[0], s[1], w[0], w[1], stride, th, alpha,
beta, delay, blockdim, griddim)
else:
V, I, S, C = self.convolution(S, I, V, C, s, w, stride, 0., alpha, beta, delay,
blockdim, griddim)
self.layers[i]['V'][:, :, :] = V
self.layers[i]['I'][:, :, :] = I
self.layers[i]['C'][:, :, :] = C
#if (i == 5) & (t == self.total_time-1):
# print("Layer anterior (post) " + str(i-1) + ' ' + str(self.network_struc[i-1]['Type']) +
# " spikes: " + str(np.count_nonzero(self.layers[i - 1]['S'][:, :, :, :])))
#print("I despues:")
#print(I)
#print("V despues:")
#print(V)
S, K_inh = self.lateral_inh(S, V, K_inh, blockdim, griddim)
self.layers[i]['S'][:, :, :, t] = S
self.layers[i]['K_inh'] = K_inh
elif self.network_struc[i]['Type'] == 'P_conv':
I = self.layers[i]['I'] # Output voltage before
for p in {0, 1}:
# Set Parallel conv params
S_tmp = S[p][:, :, :, t] # Output spikes
V_tmp = V[p][:, :, :] # Output voltage before
I_tmp = I[p][:, :, :] # Output voltage before
C_tmp = self.layers[i]['C'][p][:, :, :] # Output delay counter before
K_inh_tmp = K_inh[p]
alpha = self.network_struc[i]['alpha'][p]
beta = self.network_struc[i]['beta'][p]
delay = self.network_struc[i]['delay'][p]
if (self.network_struc[i-1]['Type'] == 'P_conv') | \
(self.network_struc[i-1]['Type'] == 'P_pool'):
V_tmp, I_tmp, S_tmp, C_tmp = self.convolution(S_tmp, I_tmp, V_tmp, C_tmp, s[p], w[p],
stride, th[p], alpha, beta, delay,
blockdim, griddim)
else:
V_tmp, I_tmp, S_tmp, C_tmp = self.convolution(S_tmp, I_tmp, V_tmp, C_tmp, s, w[p],
stride, th[p], alpha, beta, delay,
blockdim, griddim)
self.layers[i]['V'][p][:, :, :] = V_tmp
self.layers[i]['I'][p][:, :, :] = I_tmp
self.layers[i]['C'][p][:, :, :] = C_tmp
S_tmp, K_inh_tmp = self.lateral_inh(S_tmp, V_tmp, K_inh_tmp, blockdim, griddim)
self.layers[i]['S'][p][:, :, :, t] = S_tmp
self.layers[i]['K_inh'][p] = K_inh_tmp
elif self.network_struc[i]['Type'] == 'P_pool':
for p in {0, 1}:
K_inh_tmp = K_inh[p]
S_tmp = S[p][:, :, :, t] # Output spikes
if (self.network_struc[i-1]['Type'] == 'P_conv') | \
(self.network_struc[i-1]['Type'] == 'P_pool'):
S_tmp = self.pooling(S_tmp, s[p], w[p], stride, th[p], blockdim, griddim)
else:
S_tmp = self.pooling(S_tmp, s, w[p], stride, th[p], blockdim, griddim)
self.layers[i]['S'][p][:, :, :, t] = S_tmp
if i < 3:
S_tmp, K_inh_tmp = self.lateral_inh(S_tmp, V[p][:, :, :], K_inh_tmp, blockdim, griddim)
self.layers[i]['S'][p][:, :, :, t] = S_tmp
self.layers[i]['K_inh'][p] = K_inh_tmp
elif self.network_struc[i]['Type'] == 'pool':
if self.network_struc[i - 1]['Type'] == 'P_conv':
S = self.parallel_pooling(S, s[0], s[1], w[0], w[1], stride, th, blockdim, griddim)
else:
S = self.pooling(S, s, w, stride, th, blockdim, griddim)
self.layers[i]['S'][:, :, :, t] = S
if i < 3:
S, K_inh = self.lateral_inh(S, V, K_inh, blockdim, griddim)
self.layers[i]['S'][:, :, :, t] = S
self.layers[i]['K_inh'] = K_inh
elif self.network_struc[i]['Type'] == 'G_pool':
if self.network_struc[i - 1]['Type'] == 'P_conv':
print("NOT IMPLEMENTED YET")
else:
S = self.pooling(S, s, w, stride, th, blockdim, griddim)
self.layers[i]['S'][:, :, :, t] = S
elif self.network_struc[i]['Type'] == 'PG_pool':
for p in {0, 1}:
S_tmp = S[p][:, :, :, t] # Output spikes
if (self.network_struc[i-1]['Type'] == 'P_conv') | \
(self.network_struc[i-1]['Type'] == 'P_pool'):
S_tmp = self.pooling(S_tmp, s[p], w[p], stride, th[p], blockdim, griddim)
else:
S_tmp = self.pooling(S_tmp, s, w[p], stride, th[p], blockdim, griddim)
self.layers[i]['S'][p][:, :, :, t] = S_tmp
if t == (self.total_time-1):
print("Layer " + str(i) + ' ' + str(self.network_struc[i]['Type']) + " spikes: " + str(np.count_nonzero(self.layers[i]['S'])))
# STDP learning
lay = self.learning_layer
if self.network_struc[lay]['Type'] == 'conv':
# valid are neurons in the learning layer that can do STDP and that have fired in the current t
S = self.layers[lay]['S'][:, :, :, t] # Output spikes
V = self.layers[lay]['V'][:, :, :] # Output voltage
K_STDP = self.layers[lay]['K_STDP'] # Lateral inhibition matrix
valid = S*V*K_STDP
if np.count_nonzero(valid) > 0:
H, W, D = self.network_struc[lay]['shape']
stride = self.network_struc[lay]['stride']
offset = self.offsetSTDP[lay]
a_minus = self.stdp_a_minus[lay]
a_plus = self.stdp_a_plus[lay]
maxval, maxind1, maxind2 = self.get_STDP_idxs(valid, H, W, D, lay)
blockdim = (self.thds_per_dim, self.thds_per_dim, self.thds_per_dim)
griddim = (int(ceil(H / blockdim[0])) if int(ceil(H / blockdim[2])) != 0 else 1,
int(ceil(W / blockdim[1])) if int(ceil(W / blockdim[2])) != 0 else 1,
int(ceil(D / blockdim[2])) if int(ceil(D / blockdim[2])) != 0 else 1)
if (self.network_struc[lay-1]['Type'] == 'P_conv') | \
(self.network_struc[lay-1]['Type'] == 'P_pool'):
# FALTA TERMINAR -> INCOMPLETO XQ NO LO USO
for p in {0, 1}:
s = self.layers[lay - 1]['S'][p][:, :, :, :t] # Input spikes
ssum = np.sum(s, axis=3)
s = np.pad(ssum, ((H_pad, H_pad), (W_pad, W_pad), (0, 0)), mode='constant') # Pad the input
w = self.weights[lay - 1][p]
w, K_STDP = self.STDP(S.shape, s, w, K_STDP,
maxval, maxind1, maxind2,
stride, offset, a_minus, a_plus, blockdim, griddim)
self.weights[lay - 1][p] = w
self.layers[lay]['K_STDP'] = K_STDP
else:
s = self.layers[lay - 1]['S'][:, :, :, :t] # Input spikes
ssum = np.sum(s, axis=3)
s = np.pad(ssum, ((H_pad, H_pad), (W_pad, W_pad), (0, 0)), mode='constant') # Pad the input
w = self.weights[lay - 1]
w, K_STDP = self.STDP(S.shape, s, w, K_STDP,
maxval, maxind1, maxind2,
stride, offset, a_minus, a_plus, blockdim, griddim)
self.weights[lay - 1] = w
self.layers[lay]['K_STDP'] = K_STDP
if self.network_struc[lay]['Type'] == 'P_conv':
for p in {0, 1}:
# valid are neurons in the learning layer that can do STDP and that have fired in the current t
S = self.layers[lay]['S'][p][:, :, :, t] # Output spikes
V = self.layers[lay]['V'][p][:, :, :] # Output voltage
K_STDP = self.layers[lay]['K_STDP'][p] # Lateral inhibition matrix
valid = S*V*K_STDP
if np.count_nonzero(valid) > 0:
H, W, D = self.network_struc[lay]['shape']
stride = self.network_struc[lay]['stride']
offset = self.offsetSTDP[lay]
a_minus = self.stdp_a_minus[lay]
a_plus = self.stdp_a_plus[lay]
if (self.network_struc[lay-1]['Type'] == 'P_conv') | \
(self.network_struc[lay-1]['Type'] == 'P_pool'):
s = self.layers[lay - 1]['S'][p][:, :, :, :t] # Input spikes
else:
s = self.layers[lay - 1]['S'][:, :, :, :t] # Input spikes
ssum = np.sum(s, axis=3)
s = np.pad(ssum, ((H_pad, H_pad), (W_pad, W_pad), (0, 0)), mode='constant') # Pad the input
w = self.weights[lay - 1][p]
maxval, maxind1, maxind2 = self.get_STDP_idxs(valid, H, W, D, lay)
blockdim = (self.thds_per_dim, self.thds_per_dim, self.thds_per_dim)
griddim = (int(ceil(H / blockdim[0])) if int(ceil(H / blockdim[2])) != 0 else 1,
int(ceil(W / blockdim[1])) if int(ceil(W / blockdim[2])) != 0 else 1,
int(ceil(D / blockdim[2])) if int(ceil(D / blockdim[2])) != 0 else 1)
w, K_STDP = self.STDP(S.shape, s, w, K_STDP,
maxval, maxind1, maxind2,
stride, offset, a_minus, a_plus, blockdim, griddim)
self.weights[lay - 1][p] = w
self.layers[lay]['K_STDP'][p] = K_STDP
# Train all images in training set
def train_SDNN(self):
"""
Trains the SDNN with the learning set of images
We iterate over the set of images a maximum of self.max_iter times
"""
print("-----------------------------------------------------------")
print("-------------------- STARTING LEARNING---------------------")
print("-----------------------------------------------------------")
# Levanto la secuencia entera, para luego procesarla de a frames de total_time
if not self.svm:
frame = 0
for i in range(self.max_iter):
print("----------------- Learning Progress {}%----------------------".format(str(i) + '/'
+ str(self.max_iter)
+ ' ('
+ str(100 * i / self.max_iter)
+ ')'))
# Dentro del total de iteraciones veo cuantas le corresponden a cada layer
# Me fijo si ya realice todas las iteraciones de este layer
if self.counter > self.max_learn_iter[self.learning_layer]:
self.curr_lay_idx += 1 # Paso al siguiente layer
self.learning_layer = self.learnable_layers[self.curr_lay_idx] # Actualizo el learning layer actual
self.counter = 0 # Reseteo el contador para este layer
self.counter += 1 # Caso contrario aumento el contador
if self.svm:
self.reset_layers() # Reset all layers values for the new image/frame/sequence
else:
self.reset_layers_spikes() # Reset all spikes for the new image/frame/sequence
if self.DoG:
try:
path_img = next(self.learn_buffer)
except:
self.spike_times_train, self.learn_buffer = tee(self.spike_times_train)
path_img = next(self.learn_buffer)
# Obtengo los spike times
st = DoG_filter(path_img, self.filt, self.img_size, self.total_time, self.num_layers)
st = np.expand_dims(st, axis=2)
elif self.svm:
st = self.spike_times_learn[self.curr_img, :, :, :, :] # (Image_number, H, W, M, time) to (H, W, M, time)
else:
st = | |
is received), call connect()
"""
log.debug("reconnecting...")
async def handler(event: Any) -> None:
# We yield here to allow synchronous handlers to work first
await asyncio.sleep(0)
self.connect()
self.add_event_handler('disconnected', handler, disposable=True)
self.disconnect(wait, reason)
def configure_socket(self) -> None:
"""Set timeout and other options for self.socket.
Meant to be overridden.
"""
pass
def configure_dns(self, resolver: Any, domain: Optional[str] = None, port: Optional[int] = None) -> None:
"""
Configure and set options for a :class:`~dns.resolver.Resolver`
instance, and other DNS related tasks. For example, you
can also check :meth:`~socket.socket.getaddrinfo` to see
if you need to call out to ``libresolv.so.2`` to
run ``res_init()``.
Meant to be overridden.
:param resolver: A :class:`~dns.resolver.Resolver` instance
or ``None`` if ``dnspython`` is not installed.
:param domain: The initial domain under consideration.
:param port: The initial port under consideration.
"""
pass
def get_ssl_context(self) -> ssl.SSLContext:
"""
Get SSL context.
"""
if self.ciphers is not None:
self.ssl_context.set_ciphers(self.ciphers)
if self.keyfile and self.certfile:
try:
self.ssl_context.load_cert_chain(self.certfile, self.keyfile)
except (ssl.SSLError, OSError):
log.debug('Error loading the cert chain:', exc_info=True)
else:
log.debug('Loaded cert file %s and key file %s',
self.certfile, self.keyfile)
if self.ca_certs is not None:
ca_cert: Optional[Path] = None
# XXX: Compat before d733c54518.
if isinstance(self.ca_certs, str):
self.ca_certs = Path(self.ca_certs)
if isinstance(self.ca_certs, Path):
if self.ca_certs.is_file():
ca_cert = self.ca_certs
else:
for bundle in self.ca_certs:
if bundle.is_file():
ca_cert = bundle
break
if ca_cert is None:
raise InvalidCABundle(ca_cert)
self.ssl_context.verify_mode = ssl.CERT_REQUIRED
self.ssl_context.load_verify_locations(cafile=ca_cert)
return self.ssl_context
async def start_tls(self) -> bool:
"""Perform handshakes for TLS.
If the handshake is successful, the XML stream will need
to be restarted.
"""
if self.transport is None:
raise ValueError("Transport should not be None")
self.event_when_connected = "tls_success"
ssl_context = self.get_ssl_context()
try:
if hasattr(self.loop, 'start_tls'):
transp = await self.loop.start_tls(self.transport,
self, ssl_context)
# Python < 3.7
else:
transp, _ = await self.loop.create_connection(
lambda: self,
ssl=self.ssl_context,
sock=self.socket,
server_hostname=self.default_domain
)
except ssl.SSLError as e:
log.debug('SSL: Unable to connect', exc_info=True)
log.error('CERT: Invalid certificate trust chain.')
if not self.event_handled('ssl_invalid_chain'):
self.disconnect()
else:
self.event('ssl_invalid_chain', e)
return False
except OSError as exc:
log.debug("Connection error:", exc_info=True)
self.disconnect()
return False
der_cert = transp.get_extra_info("ssl_object").getpeercert(True)
pem_cert = ssl.DER_cert_to_PEM_cert(der_cert)
self.event('ssl_cert', pem_cert)
# If we use the builtin start_tls, the connection_made() protocol
# method is not called automatically
if hasattr(self.loop, 'start_tls'):
self.connection_made(transp)
return True
def _start_keepalive(self, event: Any) -> None:
"""Begin sending whitespace periodically to keep the connection alive.
May be disabled by setting::
self.whitespace_keepalive = False
The keepalive interval can be set using::
self.whitespace_keepalive_interval = 300
"""
self.schedule('Whitespace Keepalive',
self.whitespace_keepalive_interval,
self.send_raw,
args=(' ',),
repeat=True)
def _remove_schedules(self, event: Any) -> None:
"""Remove some schedules that become pointless when disconnected"""
self.cancel_schedule('Whitespace Keepalive')
def start_stream_handler(self, xml: ET.Element) -> None:
"""Perform any initialization actions, such as handshakes,
once the stream header has been sent.
Meant to be overridden.
"""
pass
def register_stanza(self, stanza_class: Type[StanzaBase]) -> None:
"""Add a stanza object class as a known root stanza.
A root stanza is one that appears as a direct child of the stream's
root element.
Stanzas that appear as substanzas of a root stanza do not need to
be registered here. That is done using register_stanza_plugin() from
slixmpp.xmlstream.stanzabase.
Stanzas that are not registered will not be converted into
stanza objects, but may still be processed using handlers and
matchers.
:param stanza_class: The top-level stanza object's class.
"""
self.__root_stanza.append(stanza_class)
def remove_stanza(self, stanza_class: Type[StanzaBase]) -> None:
"""Remove a stanza from being a known root stanza.
A root stanza is one that appears as a direct child of the stream's
root element.
Stanzas that are not registered will not be converted into
stanza objects, but may still be processed using handlers and
matchers.
"""
self.__root_stanza.remove(stanza_class)
def add_filter(self, mode: FilterString, handler: Callable[[StanzaBase], Optional[StanzaBase]], order: Optional[int] = None) -> None:
"""Add a filter for incoming or outgoing stanzas.
These filters are applied before incoming stanzas are
passed to any handlers, and before outgoing stanzas
are put in the send queue.
Each filter must accept a single stanza, and return
either a stanza or ``None``. If the filter returns
``None``, then the stanza will be dropped from being
processed for events or from being sent.
:param mode: One of ``'in'`` or ``'out'``.
:param handler: The filter function.
:param int order: The position to insert the filter in
the list of active filters.
"""
if order:
self.__filters[mode].insert(order, handler)
else:
self.__filters[mode].append(handler)
def del_filter(self, mode: str, handler: Callable[[StanzaBase], Optional[StanzaBase]]) -> None:
"""Remove an incoming or outgoing filter."""
self.__filters[mode].remove(handler)
def register_handler(self, handler: BaseHandler, before: Optional[BaseHandler] = None, after: Optional[BaseHandler] = None) -> None:
"""Add a stream event handler that will be executed when a matching
stanza is received.
:param handler:
The :class:`~slixmpp.xmlstream.handler.base.BaseHandler`
derived object to execute.
"""
if handler.stream is None:
self.__handlers.append(handler)
handler.stream = weakref.ref(self)
def remove_handler(self, name: str) -> bool:
"""Remove any stream event handlers with the given name.
:param name: The name of the handler.
"""
idx = 0
for handler in self.__handlers:
if handler.name == name:
self.__handlers.pop(idx)
return True
idx += 1
return False
async def get_dns_records(self, domain: str, port: Optional[int] = None) -> List[Tuple[str, str, int]]:
"""Get the DNS records for a domain.
:param domain: The domain in question.
:param port: If the results don't include a port, use this one.
"""
if port is None:
port = self.default_port
resolver = default_resolver(loop=self.loop)
self.configure_dns(resolver, domain=domain, port=port)
result = await resolve(domain, port,
service=self.dns_service,
resolver=resolver,
use_ipv6=self.use_ipv6,
use_aiodns=self.use_aiodns,
loop=self.loop)
return result
async def _pick_dns_answer(self, domain: str, port: Optional[int] = None) -> Optional[Tuple[str, str, int]]:
"""Pick a server and port from DNS answers.
Gets DNS answers if none available.
Removes used answer from available answers.
:param domain: The domain in question.
:param port: If the results don't include a port, use this one.
"""
if self._dns_answers is None:
dns_records = await self.get_dns_records(domain, port)
self._dns_answers = iter(dns_records)
try:
return next(self._dns_answers)
except StopIteration:
return None
def add_event_handler(self, name: str, pointer: Callable[..., Any], disposable: bool = False) -> None:
"""Add a custom event handler that will be executed whenever
its event is manually triggered.
:param name: The name of the event that will trigger
this handler.
:param pointer: The function to execute.
:param disposable: If set to ``True``, the handler will be
discarded after one use. Defaults to ``False``.
"""
if not name in self.__event_handlers:
self.__event_handlers[name] = []
self.__event_handlers[name].append((pointer, disposable))
def del_event_handler(self, name: str, pointer: Callable[..., Any]) -> None:
"""Remove a function as a handler for an event.
:param name: The name of the event.
:param pointer: The function to remove as a handler.
"""
if not name in self.__event_handlers:
return
# Need to keep handlers that do not use
# the given function pointer
def filter_pointers(handler: Tuple[Callable[..., Any], bool]) -> bool:
return handler[0] != pointer
self.__event_handlers[name] = list(filter(
filter_pointers,
self.__event_handlers[name]))
def event_handled(self, name: str) -> int:
"""Returns the number of registered handlers for an event.
:param name: The name of the event to check.
"""
return len(self.__event_handlers.get(name, []))
async def event_async(self, name: str, data: Any = {}) -> None:
"""Manually trigger a custom event, but await coroutines immediately.
This event generator should only be called in situations when
in-order processing of events is important, such as features
handling.
:param name: The name of the event to trigger.
:param data: Data that will be passed to each event handler.
Defaults to an empty dictionary, but is usually
a stanza object.
"""
handlers = self.__event_handlers.get(name, [])[:]
for handler in handlers:
handler_callback, disposable = handler
if disposable:
# If the handler is disposable, we will go ahead and
# remove it now instead of waiting for it to be
# processed in the queue.
try:
self.__event_handlers[name].remove(handler)
except ValueError:
pass
# If the callback is a coroutine, schedule it instead of
# running it directly
if iscoroutinefunction(handler_callback):
try:
await handler_callback(data)
except Exception as exc:
self.exception(exc)
else:
try:
handler_callback(data)
except Exception as e:
self.exception(e)
def event(self, name: str, data: Any = {}) -> None:
"""Manually trigger a custom event.
Coroutine handlers are wrapped into a future and sent into the
event loop for their execution, and not awaited.
:param | |
from typing import Any, Dict, List, Optional, Set, Callable, Tuple, Union
import torch
import copy
import warnings
from torch.fx import (
GraphModule,
)
from torch.fx.graph import (
Graph,
Node,
Argument,
)
from ..utils import (
activation_is_statically_quantized,
weight_is_quantized,
get_qparam_dict,
_parent_name,
get_swapped_custom_module_class,
)
from ..qconfig import (
QConfigAny,
qconfig_equals
)
from ..qconfig_mapping import QConfigMapping
from ..qconfig_mapping_utils import (
update_qconfig_for_qat,
)
from .qconfig_utils import (
generate_qconfig_map,
compare_prepare_convert_qconfig_mappings,
update_qconfig_for_fusion,
is_qconfig_supported_by_dtype_configs,
)
from torch.ao.quantization.backend_config.utils import (
get_root_module_to_quantized_reference_module,
get_pattern_to_dtype_configs,
get_fused_module_classes,
get_qat_module_classes,
)
from torch.ao.quantization.backend_config import get_native_backend_config_dict
from .graph_module import (
QuantizedGraphModule,
is_observed_module,
is_observed_standalone_module,
)
from ._equalize import update_obs_for_equalization, convert_eq_obs
from .utils import (
get_custom_module_class_keys,
get_quantize_node_info,
create_getattr_from_value,
collect_producer_nodes,
graph_module_from_producer_nodes,
WEIGHT_INDEX_DICT,
)
from torch.ao.quantization.quantize import (
_remove_qconfig,
is_activation_post_process,
)
from .custom_config import (
ConvertCustomConfig,
PrepareCustomConfig,
)
from .lower_to_fbgemm import lower_to_fbgemm
# TODO: revisit this list. Many helper methods shouldn't be public
__all__ = [
"convert",
"convert_custom_module",
"convert_standalone_module",
"convert_weighted_module",
"duplicate_dequantize_node",
"duplicate_quantize_dynamic_node",
"get_module_path_and_prefix",
"has_none_qconfig",
"insert_dequantize_node",
"maybe_get_observer_for_node",
"maybe_recursive_remove_dequantize",
"remove_extra_dequantize",
"remove_quant_dequant_pairs",
"restore_state",
"run_weight_observers",
]
def restore_state(
observed: torch.nn.Module
) -> Tuple[Dict[str, Tuple[str, type]],
PrepareCustomConfig,
Set[str]]:
assert is_observed_module(observed), \
'incoming model must be produced by prepare_fx'
prepare_custom_config: PrepareCustomConfig = observed._prepare_custom_config # type: ignore[assignment]
node_name_to_scope: Dict[str, Tuple[str, type]] = observed._node_name_to_scope # type: ignore[assignment]
observed_node_names: Set[str] = observed._observed_node_names # type: ignore[assignment]
return node_name_to_scope, prepare_custom_config, observed_node_names
def has_none_qconfig(node: Argument, qconfig_map: Dict[str, QConfigAny]) -> bool:
""" Check if a node has a qconfig of None, i.e. user requested to not quantize
the node
"""
return isinstance(node, Node) and node.name in qconfig_map and qconfig_map[node.name] is None
def run_weight_observers(observed: GraphModule) -> None:
""" Extract the subgraph that produces the weight for dynamic quant
or weight only quant node and run the subgraph to observe the weight.
Note that the observers of dynamic quant or weight only quant ops are
run during the convert step.
"""
for node in observed.graph.nodes:
if node.op != 'call_function' or node.target not in WEIGHT_INDEX_DICT:
continue
for i, node_arg in enumerate(node.args):
if i not in WEIGHT_INDEX_DICT[node.target]:
continue
# node_arg is weight
weight_observer_nodes = collect_producer_nodes(node_arg)
if weight_observer_nodes is None:
continue
weight_observer_module = \
graph_module_from_producer_nodes(
observed, weight_observer_nodes)
# run the weight observer
weight_observer_module()
# this method is temporary will be removed soon
def duplicate_quantize_dynamic_node(quantized: QuantizedGraphModule) -> QuantizedGraphModule:
quantized_root = quantized
for node in quantized.graph.nodes:
if (node.op == "call_function" and node.target == torch.quantize_per_tensor_dynamic):
users = list(node.users)
if len(users) > 1:
for user in users:
with quantized.graph.inserting_before(node):
new_node = quantized.graph.create_node(
"call_function",
torch.quantize_per_tensor_dynamic,
node.args,
node.kwargs)
user.replace_input_with(node, new_node)
quantized.graph.erase_node(node)
quantized = QuantizedGraphModule(quantized_root, quantized.graph, quantized_root.preserved_attr_names)
return quantized
def duplicate_dequantize_node(quantized: QuantizedGraphModule) -> QuantizedGraphModule:
"""
If a dequantize node has multiple uses, duplicate it and create one dequantize node for each use.
This is to enable the pattern matching to map from individual quant - dequant - ref_module to
final quantized module.
"""
quantized_root = quantized
for node in quantized.graph.nodes:
if (node.op == "call_method" and node.target == "dequantize" or
(node.op == "call_function" and node.target == torch.dequantize)):
users = list(node.users)
if len(users) > 1:
for user in users:
with quantized.graph.inserting_before(node):
new_node = quantized.graph.create_node("call_method", "dequantize", node.args, {})
user.replace_input_with(node, new_node)
quantized.graph.erase_node(node)
quantized = QuantizedGraphModule(quantized_root, quantized.graph, quantized_root.preserved_attr_names)
return quantized
def remove_extra_dequantize(quantized: QuantizedGraphModule) -> QuantizedGraphModule:
"""
Removes duplicate dequant nodes in the graph, for an operator that has multiple dequant nodes as a user,
replace them with a single dequant node that can be shared across all the uses.
"""
quantized_root = quantized
for node in quantized.graph.nodes:
users = list(node.users)
dequant_users = [user for user in node.users if user.op == "call_method" and user.target == "dequantize" or
(user.op == "call_function" and user.target == torch.dequantize)]
if len(dequant_users) > 1:
with quantized.graph.inserting_after(node):
unique_dq = quantized.graph.create_node("call_method", "dequantize", users[0].args, {})
for dequant in dequant_users:
dequant.replace_all_uses_with(unique_dq)
quantized.graph.erase_node(dequant)
quantized = QuantizedGraphModule(quantized_root, quantized.graph, quantized_root.preserved_attr_names)
return quantized
def remove_quant_dequant_pairs(quantized: QuantizedGraphModule) -> QuantizedGraphModule:
quantized_root = quantized
for node in quantized.graph.nodes:
if node.op == "call_function" and node.target in [torch.quantize_per_tensor, torch.quantize_per_channel]:
users = list(node.users)
user = users[0] if users else None
if len(users) == 1 and user.op == "call_method" and user.target == "dequantize":
user.replace_all_uses_with(node.args[0])
quantized.graph.erase_node(user)
orig_args = list(node.args)
quantized.graph.erase_node(node)
for arg in orig_args:
if isinstance(arg, Node) and len(list(arg.users)) == 0:
quantized.graph.erase_node(arg)
quantized = QuantizedGraphModule(quantized_root, quantized.graph, quantized_root.preserved_attr_names)
return quantized
def maybe_recursive_remove_dequantize(arg: Any, node: Node, graph: Graph):
""" If the arg is a dequantize Node, or a list/tuple/dict of dequantize Node,
we'll recursively remove the dequantize Node
"""
if isinstance(arg, Node) and \
arg.op == "call_method" and \
arg.target == "dequantize":
quantize_node = arg.args[0]
# we only replace the specific use since dequantize could be used by other nodes
# as well
node.replace_input_with(arg, quantize_node)
elif isinstance(arg, (list, tuple)):
for arg_element in arg:
maybe_recursive_remove_dequantize(arg_element, node, graph)
elif isinstance(arg, dict):
for arg_element in arg.values():
maybe_recursive_remove_dequantize(arg_element, node, graph)
else:
warnings.warn(f"Unsupported node type in recursive remove dequantize: {type(arg)}")
def get_module_path_and_prefix(
obs_node: Node,
node_name_to_scope: Dict[str, Tuple[str, type]],
qconfig_map: Dict[str, QConfigAny]):
""" Given and observer node, get the `Scope` or the fully qualified name for
the submodule containing the observed node, also return a prefix of "_input"
when the observed node is an input of a F.linear op, and not the output of another
quantized op.
TODO: this logic is hacky, we should think about how to remove it or make it more
general
"""
observed_node = obs_node.args[0]
# an observer can be inserted for both input of the next operator or output of the previous
# operator (they can be the same)
# this flag identifies if the observer is inserted only because the observed node is
# the input of the next operator
assert isinstance(observed_node, Node), \
f"Expecting observed node to be a Node, but got {observed_node}"
is_input_observer_only = qconfig_map[observed_node.name] is None if observed_node.name in qconfig_map else None
if is_input_observer_only:
# if the quantize function is at the input of op, then we find the first user of the observer_node
# to get the path. If a linear call_function is in the user list, we return the first instance
# of linear node to get the FQN.
users = list(obs_node.users)
first_linear_use_or_first_use = users[0] if users else None
linear_node = None
for n in users:
if n.op == "call_function" and n.target == torch.nn.functional.linear:
linear_node = n
break
if linear_node:
first_linear_use_or_first_use = linear_node
prefix = "_input"
else:
# if the quantize function is at the output of the op, we use the observer input node to get the path
first_linear_use_or_first_use = observed_node
prefix = ""
if first_linear_use_or_first_use and first_linear_use_or_first_use.name in node_name_to_scope:
module_path, _ = node_name_to_scope[first_linear_use_or_first_use.name]
else:
# TODO: it's not used, so actually we can skip quantization
# but this requires changing return type of quantize_node
# we can fix it later if needed
module_path = ""
return module_path, prefix
def insert_dequantize_node(
node: Node,
graph: Graph):
""" Inserts dequantize node for `node` in `graph`
"""
with graph.inserting_after(node):
dequantize_node = graph.call_method("dequantize", (node,))
for user_node in dict(node.users):
if user_node is not dequantize_node:
user_node.replace_input_with(node, dequantize_node)
def maybe_get_observer_for_node(
node: Node,
modules: Dict[str, torch.nn.Module]
) -> Optional[torch.nn.Module]:
"""
If the node is observed, return the observer
instance. Otherwise, return None.
"""
for maybe_obs_node, _ in node.users.items():
if maybe_obs_node.op == 'call_module':
maybe_obs = modules[str(maybe_obs_node.target)]
if is_activation_post_process(maybe_obs):
return maybe_obs
return None
def convert_standalone_module(
node: Node,
modules: Dict[str, torch.nn.Module],
model: torch.fx.GraphModule,
is_reference: bool,
backend_config_dict: Optional[Dict[str, Any]]):
""" Converts a observed standalone module to a quantized standalone module by calling
the fx convert api, currently using the same `is_reference` flag as parent, but we may
changing this behavior in the future (e.g. separating quantization and lowering for
standalone module as well)
Args:
- node: The call_module node of the observed standalone module
- modules: named_module of original model
- model: original model
- is_reference: a flag from parent provided by user to decide if we want to
produce a reference model or a fbgemm/qnnpack model
- backend_config_dict: backend configuration of the target backend of quantization
"""
convert = torch.ao.quantization.quantize_fx.convert_fx # type: ignore[attr-defined]
# We know that observed standalone module is a GraphModule since
# it's produced by us
observed_standalone_module : GraphModule = modules[str(node.target)] # type: ignore[assignment]
sm_input_quantized_idxs = \
observed_standalone_module \
._standalone_module_input_quantized_idxs\
.tolist() # type: ignore[operator]
# remove the dequantize nodes for inputs
args = list(node.args)
for idx in range(len(args)):
if idx in sm_input_quantized_idxs:
arg = args[idx]
if arg.op == "call_method" and arg.target == "dequantize": # type: ignore[union-attr]
quantize_node = arg.args[0] # type: ignore[union-attr]
node.replace_input_with(arg, quantize_node)
| |
'''Optimize rateless codes for distributed computing
'''
import math
import random
import logging
import numpy as np
import pandas as pd
import pyrateless
import stats
import complexity
import overhead
import pynumeric
import tempfile
import subprocess
from os import path
from multiprocessing import Pool
def optimize_lt_parameters(num_inputs=None, target_overhead=None,
target_failure_probability=None):
'''find good lt code parameters
returns: a tuple (c, delta, mode)
'''
c, delta = pyrateless.heuristic(
num_inputs=num_inputs,
target_failure_probability=target_failure_probability,
target_overhead=target_overhead,
)
# compute the robust Soliton distribution mode
mode = pyrateless.coding.stats.mode_from_delta_c(
num_inputs=num_inputs,
delta=delta,
c=c,
)
return c, delta, mode
def lt_encoding_complexity(num_inputs=None, failure_prob=None,
target_overhead=None, code_rate=None):
'''Return the decoding complexity of LT codes. Computed from the
average of the degree distribution.
The number of columns is assumed to be 1. Scale the return value
of this function by the actual number of columns to get the
correct complexity.
'''
# find good LT code parameters
if num_inputs == 2:
mode = 2
delta = 0.9999999701976676
else:
c, delta, mode = optimize_lt_parameters(
num_inputs=num_inputs,
target_overhead=target_overhead,
target_failure_probability=failure_prob,
)
avg_degree = pyrateless.Soliton(
delta=delta,
mode=mode,
symbols=num_inputs).mean()
encoding_complexity = pyrateless.optimize.complexity.encoding_additions(
avg_degree,
code_rate,
num_inputs,
1, # number of columns
) * complexity.ADDITION_COMPLEXITY
encoding_complexity += pyrateless.optimize.complexity.encoding_multiplications(
avg_degree,
code_rate,
num_inputs,
1,
) * complexity.MULTIPLICATION_COMPLEXITY
return encoding_complexity
def lt_decoding_complexity(num_inputs=None, failure_prob=None,
target_overhead=None):
'''Return the decoding complexity of LT codes. Data is manually
entered from simulations carried out using
https://github.com/severinson/RaptorCodes
'''
# maps a tuple (num_inputs, target_failure_probability,
# target_overhead) to a tuple (num_inactivations,
# num_row_operations). contains simulated results.
if failure_prob == 1e-1:
filename = './results/LT_1e-1.csv'
elif failure_prob == 1e-3:
filename = './results/LT_1e-3.csv'
elif failure_prob == 1e-6:
filename = './results/LT_1e-6.csv'
elif failure_prob == 1e-9:
filename = './results/LT_1e-9.csv'
else:
logging.error('no results for tfp={}'.format(failure_prob))
try:
df = pd.read_csv(filename)
except:
logging.error('could not load file {}.'.format(filename))
return math.inf
overhead = round(num_inputs*(target_overhead-1))
df = df.loc[df['num_inputs'] == num_inputs]
df = df.loc[df['overhead'] == overhead]
if len(df) != 1:
logging.warning(
'did not find exactly 1 row for num_inputs={}, failure_prob={}, target_overhead=: {} symbols'.format(
num_inputs, failure_prob, overhead, df,))
return math.inf
a = df['diagonalize_decoding_additions']
a += df['diagonalize_rowadds']
a += df['solve_dense_decoding_additions']
a += df['solve_dense_rowadds']
a += df['backsolve_decoding_additions']
a += df['backsolve_rowadds']
a = a.values[0]
m = df['diagonalize_decoding_multiplications']
m += df['diagonalize_rowmuls']
m += df['solve_dense_decoding_multiplications']
m += df['solve_dense_rowmuls']
m += df['backsolve_decoding_multiplications']
m += df['backsolve_rowmuls']
m = m.values[0]
return a*complexity.ADDITION_COMPLEXITY + m*complexity.MULTIPLICATION_COMPLEXITY
def evaluate(parameters, target_overhead=None,
target_failure_probability=None,
pdf_fun=None, partitioned=False,
cachedir=None):
'''evaluate LT code performance.
args:
parameters: system parameters.
pdf_fun: see rateless.performance_integral
partitioned: evaluate the performance of the scheme using a partitioned LT
code with rows_per_batch number of partitions. this case is easy to
evaluate as we will always receive the same coded symbols for each
partition. in particular, if it is possible to decode one partition, we can
decode all others as well. this is only true for
num_partitions=rows_per_batch.
returns: dict with performance results.
'''
assert target_overhead > 1
assert 0 < target_failure_probability < 1
assert isinstance(partitioned, bool)
# we support only either no partitioning or exactly rows_per_batch
# partitions. this case is much simpler to handle due to all partitions
# behaving the same only in this instance.
if partitioned:
num_partitions = parameters.rows_per_batch
else:
num_partitions = 1
# guaranteed to be an integer
num_inputs = int(parameters.num_source_rows / num_partitions)
# compute encoding complexity
encoding_complexity = lt_encoding_complexity(
num_inputs=num_inputs,
failure_prob=target_failure_probability,
target_overhead=target_overhead,
code_rate=parameters.q/parameters.num_servers,
)
encoding_complexity *= parameters.num_columns
encoding_complexity *= num_partitions
encoding_complexity *= parameters.muq
# compute decoding complexity
decoding_complexity = lt_decoding_complexity(
num_inputs=num_inputs,
failure_prob=target_failure_probability,
target_overhead=target_overhead,
)
decoding_complexity *= num_partitions
decoding_complexity *= parameters.num_outputs
# find good code parameters
if num_inputs == 2:
mode = 2
delta = 0.9999999701976676
else:
c, delta, mode = optimize_lt_parameters(
num_inputs=num_inputs,
target_overhead=target_overhead,
target_failure_probability=target_failure_probability,
)
logging.debug(
'LT mode=%d, delta=%f for %d input symbols, target overhead %f, target failure probability %f. partitioned: %r',
mode, delta, parameters.num_source_rows,
target_overhead, target_failure_probability,
partitioned,
)
# scale the number of multiplications required for encoding/decoding and
# store in a new dict.
result = dict()
# compute encoding delay
result['encode'] = stats.order_mean_shiftexp(
parameters.num_servers,
parameters.num_servers,
parameter=encoding_complexity / parameters.num_servers,
)
# compute decoding delay
result['reduce'] = stats.order_mean_shiftexp(
parameters.q,
parameters.q,
parameter=decoding_complexity / parameters.q,
)
# simulate the map phase load/delay. this simulation takes into account the
# probability of decoding at various levels of overhead.
simulated = performance_integral(
parameters=parameters,
num_inputs=num_inputs,
target_overhead=target_overhead,
mode=mode,
delta=delta,
pdf_fun=pdf_fun,
cachedir=cachedir,
)
result['delay'] = simulated['delay']
result['load'] = simulated['load']
return result
def lt_success_pdf(overhead_levels, num_inputs=None, mode=None, delta=None):
'''evaluate the decoding probability pdf.
args:
overhead_levels: iterable of overhead levels to evaluate the PDF at.
num_inputs: number of input symbols.
returns: a vector of the same length as overhead_levels, where i-th element
is the probability of decoding at an overhead of overhead_levels[i].
'''
# create a distribution object. this is needed for the decoding success
# probability estimate.
soliton = pyrateless.Soliton(
symbols=num_inputs,
mode=mode,
failure_prob=delta,
)
# compute the probability of decoding at discrete levels of overhead. the
# first element is zero to take make the pdf sum to 1.
decoding_cdf = np.fromiter(
[0] + [1-pyrateless.optimize.decoding_failure_prob_estimate(
soliton=soliton,
num_inputs=num_inputs,
overhead=x) for x in overhead_levels
], dtype=float)
# differentiate the CDF to obtain the PDF
decoding_pdf = np.diff(decoding_cdf)
return decoding_pdf
def lt_success_samples(n, target_overhead=None, num_inputs=None, mode=None, delta=None):
'''sample the decoding probability distribution.
'''
assert n > 0
assert n % 1 == 0
if target_overhead is None:
target_overhead = 1
# create a distribution object. this is needed for the decoding success
# probability estimate.
soliton = pyrateless.Soliton(
symbols=num_inputs,
mode=mode,
failure_prob=delta,
)
cdf = lambda x: 1-pyrateless.optimize.decoding_failure_prob_estimate(
soliton=soliton,
num_inputs=num_inputs,
overhead=x,
)
# with Pool(processes=12) as pool:
samples = np.fromiter((
pynumeric.cnuminv(
fun=cdf,
target=random.random(),
lower=target_overhead,
) for _ in range(n)), dtype=float)
return np.maximum(samples, target_overhead)
def random_fountain_success_pdf(overhead_levels, field_size=2, num_inputs=None, mode=None, delta=None):
'''compute the decoding success probability PDF of a random fountain code over
a field of size field_size.
'''
assert field_size % 1 == 0, field_size
absolute_overhead = np.fromiter(
(num_inputs*(x-1) for x in overhead_levels),
dtype=float,
).round()
if absolute_overhead.min() < 0:
raise ValueError("error for overhead levels {}. overhead must be >=1.".format(overhead_levels))
decoding_cdf = 1-np.power(field_size, -absolute_overhead)
decoding_pdf = np.zeros(len(decoding_cdf))
decoding_pdf[1:] = np.diff(decoding_cdf)
decoding_pdf[0] = decoding_cdf[0]
return decoding_pdf
def performance_integral(parameters=None, num_inputs=None, target_overhead=None,
mode=None, delta=None, pdf_fun=None, num_overhead_levels=100,
max_overhead=None, cachedir=None):
'''compute average performance by taking into account the probability of
finishing at different levels of overhead.
pdf_fun: function used to evaluate the decoding success probability.
defaults to rateless.lt_success_pdf if None. a function given here
must have the same signature as this function.
num_overhead_levels: performance is evaluated at num_overhead_levels levels
of overhead between target_overhead and the maximum possible overhead.
'''
if pdf_fun is None:
pdf_fun = lt_success_pdf
assert callable(pdf_fun)
# get the max possible overhead
if max_overhead is None:
max_overhead = parameters.num_coded_rows / parameters.num_source_rows
if max_overhead < target_overhead:
raise ValueError("target overhead may not exceed the inverse of the code rate")
# evaluate the performance at various levels of overhead
overhead_levels = np.linspace(target_overhead, max_overhead, num_overhead_levels)
# compute the probability of decoding at the respective levels of overhead
decoding_probabilities = pdf_fun(
overhead_levels,
num_inputs=num_inputs,
mode=mode,
delta=delta,
)
# compute load/delay at the levels of overhead
results = list()
for overhead_level, decoding_probability in zip(overhead_levels, decoding_probabilities):
# monte carlo simulation of the load/delay at this overhead
df = overhead.performance_from_overhead(
parameters=parameters,
overhead=overhead_level,
design_overhead=target_overhead,
cachedir=cachedir,
)
# average the columns of the df
result = {label:df[label].mean() for label in df}
# multiply by the probability of decoding at this overhead level
for label in result:
result[label] *= decoding_probability
results.append(result)
# create a dataframe and sum along the columns
df = pd.DataFrame(results)
return {label:df[label].sum() for label in df}
def order_pdf(parameters=None,
target_overhead=None,
target_failure_probability=None,
partitioned=False,
num_overhead_levels=100,
num_samples=100000,
cachedir=None):
'''simulate the order PDF, i.e., the PDF over the number of servers needed to
decode successfully.
num_samples: total number of samples to take of the number of servers
needed. the PDF is inferred from all samples.
returns: two arrays (order_values, order_probabilities) with the possible
number of servers needed and the probability of needing that number of
servers, respectively.
'''
# we support only either no partitioning or exactly rows_per_batch
# partitions. this case is much simpler to handle due to all partitions
# behaving the same only in this instance.
if partitioned:
num_partitions = parameters.rows_per_batch
else:
num_partitions = 1
# guaranteed to be an integer
num_inputs = int(round(parameters.num_source_rows / num_partitions))
# find good LT code parameters
c, | |
<reponame>cmcmaster1/pytorch-widedeep<filename>pytorch_widedeep/models/saint.py<gh_stars>0
"""
The code in this module is inspired by a number of implementations:
Classes PositionwiseFF and AddNorm are 'stolen' with much gratitude from the fantastic d2l.ai book:
https://d2l.ai/chapter_attention-mechanisms/transformer.html
MultiHeadedAttention is inspired by the TabTransformer implementation here:
https://github.com/lucidrains/tab-transformer-pytorch. General comment: just go and have a look to
https://github.com/lucidrains
The fixed attention implementation and SharedEmbeddings are inspired by the
TabTransformer available in AutoGluon:
https://github.com/awslabs/autogluon/tree/master/tabular/src/autogluon/tabular/models/tab_transformer
If you have not checked that library, you should.
"""
import math
import torch
import einops
from torch import nn, einsum
from pytorch_widedeep.wdtypes import * # noqa: F403
from pytorch_widedeep.models.tab_mlp import MLP, _get_activation_fn
class ContinuousEmbedding(nn.Module):
def __init__(
self,
embedding_size: int
):
super(ContinuousEmbedding, self).__init__()
self.relu = nn.ReLU()
self.embedding = nn.Linear(1, embedding_size)
def forward(self, X: Tensor) -> Tensor:
return self.relu(self.embedding(X.t().unsqueeze(2)).permute(1, 0, 2))
class PositionwiseFF(nn.Module):
def __init__(
self,
input_dim: int,
ff_hidden_dim: int,
dropout: float,
activation: str,
):
super(PositionwiseFF, self).__init__()
self.w_1 = nn.Linear(input_dim, ff_hidden_dim)
self.w_2 = nn.Linear(ff_hidden_dim, input_dim)
self.dropout = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
def forward(self, X: Tensor) -> Tensor:
return self.w_2(self.dropout(self.activation(self.w_1(X))))
class AddNorm(nn.Module):
def __init__(self, input_dim: int, dropout: float):
super(AddNorm, self).__init__()
self.dropout = nn.Dropout(dropout)
self.ln = nn.LayerNorm(input_dim)
def forward(self, X: Tensor, Y: Tensor) -> Tensor:
return self.ln(self.dropout(Y) + X)
class MultiHeadedAttention(nn.Module):
def __init__(
self,
input_dim: int,
num_heads: int,
keep_attn_weights: bool,
dropout: float,
fixed_attention: bool,
num_cat_columns: int,
):
super(MultiHeadedAttention, self).__init__()
assert (
input_dim % num_heads == 0
), "'input_dim' must be divisible by 'num_heads'"
if fixed_attention and not num_cat_columns:
raise ValueError(
"if 'fixed_attention' is 'True' the number of categorical "
"columns 'num_cat_columns' must be specified"
)
# Consistent with other implementations I assume d_v = d_k
self.d_k = input_dim // num_heads
self.num_heads = num_heads
self.dropout = nn.Dropout(dropout)
self.fixed_attention = fixed_attention
if fixed_attention:
self.inp_proj = nn.Linear(input_dim, input_dim)
self.fixed_key = nn.init.xavier_normal_(
nn.Parameter(torch.empty(num_cat_columns, input_dim))
)
self.fixed_query = nn.init.xavier_normal_(
nn.Parameter(torch.empty(num_cat_columns, input_dim))
)
else:
self.inp_proj = nn.Linear(input_dim, input_dim * 3)
self.out_proj = nn.Linear(input_dim, input_dim)
self.keep_attn_weights = keep_attn_weights
def forward(self, X: Tensor) -> Tensor:
# b: batch size, s: src seq length (num of categorical features
# encoded as embeddings), l: target sequence (l = s), e: embeddings
# dimensions, h: number of attention heads, d: d_k
if self.fixed_attention:
v = self.inp_proj(X)
k = einops.repeat(
self.fixed_key.unsqueeze(0), "b s e -> (b copy) s e", copy=X.shape[0]
)
q = einops.repeat(
self.fixed_query.unsqueeze(0), "b s e -> (b copy) s e", copy=X.shape[0]
)
else:
q, k, v = self.inp_proj(X).chunk(3, dim=2)
q, k, v = map(
lambda t: einops.rearrange(t, "b s (h d) -> b h s d", h=self.num_heads),
(q, k, v),
)
scores = einsum("b h s d, b h l d -> b h s l", q, k) / math.sqrt(self.d_k)
attn_weights = self.dropout(scores.softmax(dim=-1))
if self.keep_attn_weights:
self.attn_weights = attn_weights
attn_output = einsum("b h s l, b h l d -> b h s d", attn_weights, v)
output = einops.rearrange(attn_output, "b h s d -> b s (h d)", h=self.num_heads)
return self.out_proj(output)
class IntersampleAttention(nn.Module):
def __init__(
self,
input_dim: int,
num_heads: int,
keep_attn_weights: bool,
dropout: float,
fixed_attention: bool,
num_cat_columns: int,
num_input_columns: int
):
super(IntersampleAttention, self).__init__()
assert (
input_dim % num_heads == 0
), "'input_dim' must be divisible by 'num_heads'"
if fixed_attention and not num_cat_columns:
raise ValueError(
"if 'fixed_attention' is 'True' the number of categorical "
"columns 'num_cat_columns' must be specified"
)
# Consistent with other implementations I assume d_v = d_k
self.input_dim = input_dim
self.d_k = input_dim // num_heads
self.num_heads = num_heads
self.dropout = nn.Dropout(dropout)
self.fixed_attention = fixed_attention
if fixed_attention:
self.inp_proj = nn.Linear(input_dim, input_dim)
self.fixed_key = nn.init.xavier_normal_(
nn.Parameter(torch.empty(num_cat_columns, input_dim))
)
self.fixed_query = nn.init.xavier_normal_(
nn.Parameter(torch.empty(num_cat_columns, input_dim))
)
else:
self.inp_proj = nn.Linear(input_dim * num_input_columns, input_dim * num_input_columns * 3)
self.out_proj = nn.Linear(input_dim, input_dim)
self.keep_attn_weights = keep_attn_weights
def forward(self, X):
# b: batch size, s: src seq length (num of categorical features
# encoded as embeddings), l: target sequence (l = s), e: embeddings
# dimensions, h: number of attention heads, d: d_k
# df: embedding dim * num of features
X = einops.rearrange(X, "b s e -> () b (s e)")
if self.fixed_attention:
v = self.inp_proj(X)
k = einops.repeat(
self.fixed_key.unsqueeze(0), "b s e -> (b copy) s e", copy=X.shape[0]
)
q = einops.repeat(
self.fixed_query.unsqueeze(0), "b s e -> (b copy) s e", copy=X.shape[0]
)
else:
q, k, v = self.inp_proj(X).chunk(3, dim=2)
q, k, v = map(
lambda t: einops.rearrange(t, "() b (h df) -> () h b df", h=self.num_heads),
(q, k, v),
)
scores = einsum("b h s d, b h l d -> b h s l", q, k) / math.sqrt(self.d_k)
attn_weights = self.dropout(scores.softmax(dim=-1))
if self.keep_attn_weights:
self.attn_weights = attn_weights
attn_output = einsum("b h s l, b h l d -> b h s d", attn_weights, v)
output = einops.rearrange(attn_output, "() h b df -> () b (h df)", h=self.num_heads)
output = einops.rearrange(output, "() b (s e) -> b s e", e = self.input_dim)
return self.out_proj(output)
class SAINTTransformerEncoder(nn.Module):
def __init__(
self,
input_dim: int,
num_heads: int,
keep_attn_weights: bool,
ff_hidden_dim: int,
dropout: float,
activation: str,
fixed_attention,
num_cat_columns,
num_input_columns,
):
super(SAINTTransformerEncoder, self).__init__()
self.self_attn = MultiHeadedAttention(
input_dim,
num_heads,
keep_attn_weights,
dropout,
fixed_attention,
num_cat_columns,
)
self.is_attn = IntersampleAttention(
input_dim,
num_heads,
keep_attn_weights,
dropout,
fixed_attention,
num_cat_columns,
num_input_columns,
)
self.feed_forward = PositionwiseFF(
input_dim, ff_hidden_dim, dropout, activation
)
self.attn_addnorm = AddNorm(input_dim, dropout)
self.is_attn_addnorm = AddNorm(input_dim, dropout)
self.ff_addnorm1 = AddNorm(input_dim, dropout)
self.ff_addnorm2 = AddNorm(input_dim, dropout)
def forward(self, X: Tensor) -> Tensor:
Y_self_attn = self.self_attn(X)
Y = self.attn_addnorm(X, Y_self_attn)
Y = self.ff_addnorm1(Y, self.feed_forward(Y))
Y_is_attn = self.is_attn(Y)
Y = self.is_attn_addnorm(Y, Y_is_attn)
return self.ff_addnorm2(Y, self.feed_forward(Y))
class FullEmbeddingDropout(nn.Module):
def __init__(self, dropout: float):
super(FullEmbeddingDropout, self).__init__()
self.dropout = dropout
def forward(self, X: Tensor) -> Tensor:
mask = X.new().resize_((X.size(1), 1)).bernoulli_(1 - self.dropout).expand_as(
X
) / (1 - self.dropout)
return mask * X
class SharedEmbeddings(nn.Module):
def __init__(
self,
num_embed: int,
embed_dim: int,
embed_dropout: float,
full_embed_dropout: bool = False,
add_shared_embed: bool = False,
frac_shared_embed=8,
):
super(SharedEmbeddings, self).__init__()
assert (
embed_dim % frac_shared_embed == 0
), "'embed_dim' must be divisible by 'frac_shared_embed'"
self.add_shared_embed = add_shared_embed
self.embed = nn.Embedding(num_embed, embed_dim, padding_idx=0)
self.embed.weight.data.clamp_(-2, 2)
if add_shared_embed:
col_embed_dim = embed_dim
else:
col_embed_dim = embed_dim // frac_shared_embed
self.shared_embed = nn.Parameter(torch.empty(1, col_embed_dim).uniform_(-1, 1))
if full_embed_dropout:
self.dropout = FullEmbeddingDropout(embed_dropout)
else:
self.dropout = nn.Dropout(embed_dropout) # type: ignore[assignment]
def forward(self, X: Tensor) -> Tensor:
out = self.dropout(self.embed(X))
shared_embed = self.shared_embed.expand(out.shape[0], -1)
if self.add_shared_embed:
out += shared_embed
else:
out[:, : shared_embed.shape[1]] = shared_embed
return out
class SAINT(nn.Module):
def __init__(
self,
column_idx: Dict[str, int],
embed_input: List[Tuple[str, int]],
continuous_cols: Optional[List[str]] = None,
embed_dropout: float = 0.1,
full_embed_dropout: bool = False,
shared_embed: bool = False,
add_shared_embed: bool = False,
frac_shared_embed: int = 8,
input_dim: int = 32,
num_heads: int = 8,
num_blocks: int = 6,
dropout: float = 0.1,
keep_attn_weights: bool = False,
fixed_attention: bool = False,
num_cat_columns: Optional[int] = None,
ff_hidden_dim: int = 32 * 4,
transformer_activation: str = "gelu",
mlp_hidden_dims: Optional[List[int]] = None,
mlp_activation: str = "relu",
mlp_batchnorm: bool = False,
mlp_batchnorm_last: bool = False,
mlp_linear_first: bool = True,
embed_continuous: bool = True,
):
r"""TabTransformer model (https://arxiv.org/pdf/2012.06678.pdf) model that
can be used as the deeptabular component of a Wide & Deep model.
Parameters
----------
column_idx: Dict
Dict containing the index of the columns that will be passed through
the DeepDense model. Required to slice the tensors. e.g. {'education':
0, 'relationship': 1, 'workclass': 2, ...}
embed_input: List
List of Tuples with the column name and number of unique values
e.g. [(education, 11, 32), ...]
continuous_cols: List, Optional, default = None
List with the name of the numeric (aka continuous) columns
embed_dropout: float, default = 0.1
Dropout to be applied to the embeddings matrix
full_embed_dropout: bool, default = False
Boolean indicating if an entire embedding (i.e. the representation
for one categorical column) will be dropped in the batch. See:
``pytorch_widedeep.model.tab_transformer.FullEmbeddingDropout``.
If ``full_embed_dropout = True``, ``embed_dropout`` is ignored.
shared_embed: bool, default = False
The idea behind ``shared_embed`` is described in the Appendix A in the paper:
`'The goal of having column embedding is to enable the model to distinguish the
classes in one column from those in the other columns'`. In other words, the idea
is to let the model learn which column is embedding at the time.
add_shared_embed: bool, default = False,
The two embedding sharing strategies are: 1) add the shared | |
import os
import dtlpy as dl
import tqdm
from keras.optimizers import Adam
import numpy as np
import json
import shutil
from collections import namedtuple
from train_with_params import get_anchors, get_classes, create_model, create_tiny_model, data_generator_wrapper
YOLO_DEFAULTS_PATH = os.path.join(os.environ.get('ZOO_CONFIGS', os.getcwd()), 'yolov3_keras', 'base')
class ModelAdapter(dl.ml.BaseModelAdapter):
_defaults = {
'input_shape': (416, 416),
'annotation_filename': 'train.txt',
'classes_filename': 'coco_classes.txt',
'anchors_filename': 'yolo_anchors.txt',
'weights_filename': 'yolo.h5',
}
def __init__(self, model_entity):
super(ModelAdapter, self).__init__(model_entity)
def create_callbacks(self, dump_dir):
from keras.callbacks import TensorBoard, ModelCheckpoint, ReduceLROnPlateau, EarlyStopping
# Useful keras callbacks
self.logging_cb = TensorBoard(log_dir=dump_dir)
self.checkpoint_cb = ModelCheckpoint(dump_dir + '/ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5',
monitor='val_loss', save_weights_only=True, save_best_only=True, period=3)
self.reduce_lr_cb = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=3, verbose=1)
self.reduce_lr_half_cb = ReduceLROnPlateau(monitor='val_loss', factor=0.5, verbose=1, cooldown=3)
self.early_stopping_cb = EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=1)
def calc_utility_attrs(self):
print("Creating adapter properties. Using bucket_path {!r}".format(self.bucket_path))
setattr(self, 'classes_path',os.path.join(self.bucket_path, self.classes_filename) )
setattr(self, 'anchors_path',os.path.join(self.bucket_path, self.anchors_filename) )
setattr(self, 'weights_path',os.path.join(self.bucket_path, self.weights_filename) )
for config, config_func in zip(['classes', 'anchors'], [get_classes, get_anchors]):
# load the config (directly , from buckt or from the defult)
if not hasattr(self, config):
config_path = getattr(self, config + '_path')
print("DEBUG: config : {!r} config_path: {}; function {}".format(config, config_path, config_func))
if os.path.isfile(config_path):
setattr(self, config, config_func(config_path))
else:
print("DEBUG: {} Not found. All properties : {}".format(
config, {att: getattr(self, att) for att in dir(self) if att.startswith(config)})
)
defalut_path = os.path.join(YOLO_DEFAULTS_PATH, getattr(self, config + '_filename') )
setattr(self, config, config_func(defalut_path))
setattr(self, 'num_classes', len(self.classes))
setattr(self, 'is_tiny_version', len(self.anchors) == 6) # default setting
setattr(self, 'label_map', self.snapshot.label_map)
# =====================
# IMPLEMENTED METHODS
# =====================
def load(self, local_path):
""" Loads model and populates self.model with a model for predictions
for the train - the function overrides the self.model
:param local_path: `str` directory path in local FileSystem
"""
from yolo import YOLO
self.calc_utility_attrs()
try:
self.model = YOLO(**{
"model_path": self.weights_path,
"anchors_path": self.anchors_path,
"classes_path": self.classes_path,
"score": 0.1,
"iou": 0.45,
"model_image_size": self.input_shape,
"max_boxes": 20
})
except FileNotFoundError as err:
self.logger.warning("Failed to load model. this may be caused due empty bucket files."
"You can still train the model but runnig inference will not work.")
self.logger.debug(err)
def save(self, local_path):
""" saves configuration and weights locally
:param local_path: `str` directory path in local FileSystem
"""
self.model.save_weights(os.path.join(local_path, self.weights_filename))
if local_path != self.bucket_path:
# create a new bucket if the given path is different from current
new_bucket = self.snapshot.buckets.create(dl.BucketType.LOCAL, local_path=local_path)
self.logger.info("Created new local bucket. At {}".format(local_path))
#new_bucket = dl.LocalBucket(local_path=local_path)
# classes
with open(os.path.join(local_path, self.classes_filename), 'w') as f:
for cls in self.classes:
f.write(f"{cls}\n")
anchors = ''
for anchor in self.anchors:
anchors += "{w:n},{h:n}, ".format(w=anchor[0],h=anchor[1]) # NOT SURE ABOUT THE ORDER W,H
with open(os.path.join(local_path, self.anchors_filename), 'w') as f:
f.write(anchors[:-2] + '\n') # remove trailing ','
def train(self, local_path, dump_path, base_epocs=5, ext_epocs=0):
""" Train the model according to data in local_path and save the checkpoint to dump_path
:param local_path: not used becaues the convert writes it to the acutal lines
"""
keras_cb = super(ModelAdapter, self).dataloop_keras_callback(dump_path)
self.create_callbacks(dump_dir=dump_path)
# USE DEFAULT WEIGHTS PATH - because using load_pretraned=False didn't work
weights_path = self.weights_path
if not os.path.isfile(weights_path):
weights_path = os.path.join(YOLO_DEFAULTS_PATH, self.weights_filename)
if self.is_tiny_version:
model = create_tiny_model(self.input_shape, self.anchors, self.num_classes,
freeze_body=2, weights_path=weights_path)
else:
self.model = create_model(self.input_shape, self.anchors, self.num_classes,
freeze_body=2, weights_path=weights_path)
self.model.compile(optimizer=Adam(lr=1e-3), loss={
# use custom yolo_loss Lambda layer.
'yolo_loss': lambda y_true, y_pred: y_pred})
self.logger.info(f"Created and compiled Model! is_tiny = {self.is_tiny_version}")
if base_epocs:
batch_size = 32
self.logger.info('Base Train on {t} samples, val on {v} samples, with batch size {bt} for {ep} epocs.'.
format(t=self.num_train, v=self.num_val, bt=batch_size, ep=base_epocs))
self.model.fit_generator(data_generator_wrapper(self.train_lines, batch_size, self.input_shape, self.anchors, self.num_classes),
steps_per_epoch=max(1, self.num_train//batch_size),
validation_data=data_generator_wrapper(self.val_lines, batch_size, self.input_shape, self.anchors, self.num_classes),
validation_steps=max(1, self.num_val//batch_size),
epochs=base_epocs,
initial_epoch=0,
callbacks=[keras_cb, self.logging_cb, self.checkpoint_cb, self.reduce_lr_half_cb],
)
# Unfreeze and continue training, to fine-tune.
# Train longer if the result is not good.
if ext_epocs:
for i in range(len(self.model.layers)):
self.model.layers[i].trainable = True
self.model.compile(optimizer=Adam(lr=1e-4),
loss={'yolo_loss': lambda y_true, y_pred: y_pred}) # recompile to apply the change
self.logger.info('Unfreeze all of the layers.')
# NOTE that more GPU memory is required after unfreezing the body
batch_size = 2 if self.input_shape[0] > 600 else 16
self.logger.info('Ext Train on {t} samples, val on {v} samples, with batch size {bt} for {ep} epocs. (input shape = {sh}).'.
format(t=self.num_train, v=self.num_val, bt=batch_size, ep=ext_epocs, sh=self.input_shape))
self.model.fit_generator(
data_generator_wrapper(self.train_lines, batch_size, self.input_shape, self.anchors, self.num_classes),
steps_per_epoch=max(1, self.num_train // batch_size),
validation_data=data_generator_wrapper(self.val_lines, batch_size, self.input_shape, self.anchors,
self.num_classes),
validation_steps=max(1, self.num_val // batch_size),
epochs=base_epocs + ext_epocs,
initial_epoch=base_epocs,
callbacks=[self.logging_cb, self.checkpoint_cb, self.reduce_lr_cb, self.early_stopping_cb])
trained_weights_path = os.path.join(dump_path, 'trained_weights.h5')
self.logger.info("Train completed ({}-{}) saving train out to {}".format(base_epocs, ext_epocs, trained_weights_path))
# self.save(dump_path) # saves all weights + classes + anchors
self.model.save_weights(trained_weights_path) # save only the h5 file
return trained_weights_path
def predict(self, batch, verbose=True):
""" Model inference (predictions) on batch of image
Virtual method - need to implement
:param batch: `np.ndarray`
:return: `list[self.BoxPrediction]` prediction results by len(batch)
"""
from keras import backend as K
from skimage.transform import resize
import itertools
# Preprocess batch
# ================
scaled_batch, orig_shapes = [], []
for img in batch:
orig_shapes.append(img.shape[:2]) # NOTE: numpy shape is height, width (rows,cols) while PIL.size is width, height
img_scaled = self._letterbox_image(img) / 255.0
# img_scaled = resize(img/255.0, self.input_shape) # scale and convert to 0-1
scaled_batch.append(img_scaled)
scaled_batch = np.array(scaled_batch)
predictions = []
for i in range(len(batch)):
out_boxes, out_scores, out_classes = self.model.sess.run(
[self.model.boxes, self.model.scores, self.model.classes],
feed_dict={
self.model.yolo_model.input: np.expand_dims(scaled_batch[i], 0), # Add batch dimension.
self.model.input_image_shape: orig_shapes[i],
# K.learning_phase(): 0
})
self.logger.info('Found {} boxes for img #{} in batch (TopLeft, BottomRight )'.format(len(out_boxes), i))
item_predictions = []
for b in range(len(out_boxes)):
top, left, bottom, right = out_boxes[b]
self.logger.debug(f" @ ({top:2.1f}, {left:2.1f}),\t ({bottom:2.1f}, {right:2.1f})")
top = max(0, np.floor(top + 0.5).astype('int32'))
left = max(0, np.floor(left + 0.5).astype('int32'))
bottom = min(orig_shapes[i][0], np.floor(bottom + 0.5).astype('int32'))
right = min(orig_shapes[i][1], np.floor(right + 0.5).astype('int32'))
score = out_scores[b]
label = self.model.class_names[out_classes[b]]
self.logger.debug(f"\tBox {b:2} - {label:20}: {score:1.3f} @ {(top, left)},\t {(bottom, right)}")
item_predictions.append(self.BoxPrediction(top=top, left=left, bottom=bottom, right=right, score=score, label=label))
predictions.append(item_predictions) # list of lists
# self.model.close_session()
return predictions
def convert(self, data_path, **kwargs):
""" Convert Dataloop structure data to model structured
Virtual method - need to implement
e.g. take dlp dir structure and construct annotation file
:param data_path: `str` local File System directory path where we already downloaded the data from dataloop platform
:return:
"""
annotation_path = "{}/{}".format(data_path, self.annotation_filename)
has_partitions = self.snapshot.get_partitions(list(dl.SnapshotPartitionType)).items_count > 0
if os.path.isfile(annotation_path):
# Use the current train.txt file - no need to re parse
with open(annotation_path) as f:
lines = f.readlines()
else:
# ============================
# Prepare lines in yolo format
# ============================
if has_partitions:
partitions_to_use = [dl.SnapshotPartitionType.TRAIN, dl.SnapshotPartitionType.VALIDATION]
else: # If the was called w/o prepare_trainset
partitions_to_use = ['all']
os.makedirs(data_path, exist_ok=True)
with open(annotation_path, 'w') as f:
pass
lines = []
for partition in partitions_to_use:
# FIXME: is there a better way to know what is the partition of the image
pages = self.snapshot.get_partitions(partitions=partition) #, filters=filters)
for page in tqdm.tqdm(pages, desc="{} pages".format(partition), unit='pg', ncols=80, total=pages.total_pages_count): # dynamic_ncols=
for item in tqdm.tqdm(page, desc='items', leave=None, ncols=100, total=pages.page_size, position=1):
# Create yolov3 format annotaions
item_anns_fmt = self.parse_yolo_annotation_single_item(item, base_path=data_path)
# item_anns_fmt = self.parse_yolo_annotation_single_file(item.filename, base_path=data_path)
if item_anns_fmt is not None: # item may be filtered out
lines.append(item_anns_fmt)
with open(annotation_path, 'a') as f: # Save to file for later use
f.write(item_anns_fmt + '\n')
# # from customers.ipm.insects.create_local_trainset import prepare_yolo3_train_data
# # annotation_path = prepare_yolo3_train_data(dataset_name=self.snapshot.dataset.name, out_dir=data_path,
# # label_map=self.label_map,
# # project_name=self.model_entity.project.name)
# # COPY OF ALL CODE FROM CREATE LOCAL TRAIN SET
# # DOWNLOAD THE ITEMS
# # TODO: THIS IS ALREDY DOWNLOADED IN PREPARE_TRAIN_SET
# self.logger.info("Downloading dataset...")
# filters = dl.Filters(field='annotated', values=True)
# filters.add(field='annotated', values='discarded', operator='ne') # filter out discarded items
# filters.add_join(field="type", values=self.model_entity.output_type) # make sure only items w/ box annotations
# self.snapshopt.dataset.items.download(filters=filters, data_path=data_path, annotation_options='json')
if not has_partitions:
val_split = 0.1
self.num_val = int(len(lines)*val_split)
self.num_train = len(lines) - self.num_val
self.train_lines = lines[:self.num_train]
self.val_lines = lines[self.num_train:]
np.random.seed(10101)
np.random.shuffle(self.train_lines)
np.random.shuffle(self.val_lines)
np.random.seed(None)
self.logger.info("Finshed converting format. train lines {}; val lines {}".format(len(self.train_lines), len(self.val_lines)))
def convert_dlp(self, items:dl.entities.PagedEntities):
""" This should implement similar to convert only to work on dlp items. -> meaning create the converted version from items entities"""
# TODO
pass
@staticmethod
def _letterbox_image_orig(image, size):
'''resize image with unchanged aspect ratio using padding'''
from PIL import Image
iw, ih = image.size
w, h = size
scale = min(w/iw, h/ih)
nw = int(iw*scale)
nh = int(ih*scale)
image = image.resize((nw,nh), Image.BICUBIC)
new_image = Image.new('RGB', size, (128,128,128))
new_image.paste(image, ((w-nw)//2, (h-nh)//2))
return new_image
def _letterbox_image(self, image):
'''resize image with unchanged aspect ratio | |
<reponame>dimagilg/commcare-hq
import datetime
import random
from decimal import Decimal
from django.conf import settings
from django.core import mail
from django.test import override_settings
from dimagi.utils.dates import add_months_to_date
from corehq.apps.accounting import tasks, utils
from corehq.apps.accounting.invoicing import DomainInvoiceFactory
from corehq.apps.accounting.models import (
SMALL_INVOICE_THRESHOLD,
BillingAccount,
BillingRecord,
CreditAdjustment,
CreditLine,
DefaultProductPlan,
FeatureType,
Invoice,
LineItem,
SoftwarePlanEdition,
Subscriber,
Subscription,
SubscriptionAdjustment,
SubscriptionType,
)
from corehq.apps.accounting.tasks import calculate_users_in_all_domains
from corehq.apps.accounting.tests import generator
from corehq.apps.accounting.tests.base_tests import BaseAccountingTest
from corehq.apps.sms.models import INCOMING, OUTGOING
from corehq.apps.smsbillables.models import (
SmsBillable,
SmsGatewayFee,
SmsGatewayFeeCriteria,
SmsUsageFee,
SmsUsageFeeCriteria,
)
from corehq.apps.smsbillables.tests.generator import (
arbitrary_sms_billables_for_domain,
)
from corehq.apps.users.models import WebUser
class BaseInvoiceTestCase(BaseAccountingTest):
is_using_test_plans = False
min_subscription_length = 3
@classmethod
def setUpClass(cls):
super(BaseInvoiceTestCase, cls).setUpClass()
if cls.is_using_test_plans:
generator.bootstrap_test_software_plan_versions()
cls.billing_contact = generator.create_arbitrary_web_user_name()
cls.dimagi_user = generator.create_arbitrary_web_user_name(is_dimagi=True)
cls.currency = generator.init_default_currency()
cls.account = generator.billing_account(
cls.dimagi_user, cls.billing_contact)
cls.domain = generator.arbitrary_domain()
cls.subscription_length = 15 # months
subscription_start_date = datetime.date(2016, 2, 23)
subscription_end_date = add_months_to_date(subscription_start_date, cls.subscription_length)
cls.subscription = generator.generate_domain_subscription(
cls.account,
cls.domain,
date_start=subscription_start_date,
date_end=subscription_end_date,
)
def tearDown(self):
for user in self.domain.all_users():
user.delete(deleted_by=None)
super(BaseAccountingTest, self).tearDown()
@classmethod
def tearDownClass(cls):
cls.domain.delete()
if cls.is_using_test_plans:
utils.clear_plan_version_cache()
super(BaseInvoiceTestCase, cls).tearDownClass()
class TestInvoice(BaseInvoiceTestCase):
"""
Tests that invoices are properly generated for the first month, last month, and a random month in the middle
of a subscription for a domain.
"""
def test_no_invoice_before_start(self):
"""
No invoice gets created if the subscription didn't start in the previous month.
"""
tasks.generate_invoices(self.subscription.date_start)
self.assertEqual(self.subscription.invoice_set.count(), 0)
def test_subscription_invoice(self):
invoice_date = utils.months_from_date(self.subscription.date_start, random.randint(2, self.subscription_length))
calculate_users_in_all_domains(invoice_date)
tasks.generate_invoices(invoice_date)
self.assertEqual(self.subscription.invoice_set.count(), 1)
self.assertEqual(self.subscription.subscriber.domain, self.domain.name)
invoice = self.subscription.invoice_set.latest('date_created')
num_product_line_items = invoice.lineitem_set.get_products().count()
self.assertEqual(num_product_line_items, 1)
num_feature_line_items = invoice.lineitem_set.get_features().count()
self.assertEqual(num_feature_line_items, self.subscription.plan_version.feature_rates.count())
self.assertEqual(invoice.subscription, self.subscription)
self.assertGreater(invoice.balance, Decimal('0.0000'))
def test_no_invoice_after_end(self):
"""
No invoices should be generated for the months after the end date of the subscription.
"""
invoice_date = utils.months_from_date(self.subscription.date_end, 2)
tasks.generate_invoices(invoice_date)
self.assertEqual(self.subscription.invoice_set.count(), 0)
def test_community_no_charges_no_invoice(self):
"""
No invoices should be generated for domains that are not on a subscription and do not
have any per_excess charges on users or SMS messages
"""
domain = generator.arbitrary_domain()
self.addCleanup(domain.delete)
tasks.generate_invoices()
self.assertRaises(Invoice.DoesNotExist,
lambda: Invoice.objects.get(subscription__subscriber__domain=domain.name))
def test_community_invoice(self):
"""
For an unsubscribed domain with any charges over the community limit for the month of invoicing,
make sure that an invoice is generated in addition to a subscription for that month to
the community plan.
"""
domain = generator.arbitrary_domain()
self.addCleanup(domain.delete)
generator.create_excess_community_users(domain)
account = BillingAccount.get_or_create_account_by_domain(
domain, created_by=self.dimagi_user)[0]
generator.arbitrary_contact_info(account, self.dimagi_user)
account.date_confirmed_extra_charges = datetime.date.today()
account.save()
today = datetime.date.today()
calculate_users_in_all_domains(datetime.date(today.year, today.month, 1))
tasks.generate_invoices()
subscriber = Subscriber.objects.get(domain=domain.name)
invoices = Invoice.objects.filter(subscription__subscriber=subscriber)
self.assertEqual(invoices.count(), 1)
invoice = invoices.get()
self.assertEqual(invoice.subscription.subscriber.domain, domain.name)
self.assertEqual(invoice.subscription.date_start, invoice.date_start)
self.assertEqual(
invoice.subscription.date_end - datetime.timedelta(days=1),
invoice.date_end
)
def test_date_due_not_set_small_invoice(self):
"""Date Due doesn't get set if the invoice is small"""
invoice_date_small = utils.months_from_date(self.subscription.date_start, 1)
calculate_users_in_all_domains(invoice_date_small)
tasks.generate_invoices(invoice_date_small)
small_invoice = self.subscription.invoice_set.first()
self.assertTrue(small_invoice.balance <= SMALL_INVOICE_THRESHOLD)
self.assertIsNone(small_invoice.date_due)
def test_date_due_set_large_invoice(self):
"""Date Due only gets set for a large invoice (> $100)"""
self.subscription.plan_version = generator.subscribable_plan_version(SoftwarePlanEdition.ADVANCED)
self.subscription.save()
invoice_date_large = utils.months_from_date(self.subscription.date_start, 3)
calculate_users_in_all_domains(invoice_date_large)
tasks.generate_invoices(invoice_date_large)
large_invoice = self.subscription.invoice_set.last()
self.assertTrue(large_invoice.balance > SMALL_INVOICE_THRESHOLD)
self.assertIsNotNone(large_invoice.date_due)
def test_date_due_gets_set_autopay(self):
"""Date due always gets set for autopay """
self.subscription.account.update_autopay_user(self.billing_contact, self.domain)
invoice_date_autopay = utils.months_from_date(self.subscription.date_start, 1)
calculate_users_in_all_domains(invoice_date_autopay)
tasks.generate_invoices(invoice_date_autopay)
autopay_invoice = self.subscription.invoice_set.last()
self.assertTrue(autopay_invoice.balance <= SMALL_INVOICE_THRESHOLD)
self.assertIsNotNone(autopay_invoice.date_due)
class TestContractedInvoices(BaseInvoiceTestCase):
def setUp(self):
super(TestContractedInvoices, self).setUp()
self.subscription.service_type = SubscriptionType.IMPLEMENTATION
self.subscription.save()
self.invoice_date = utils.months_from_date(
self.subscription.date_start,
random.randint(2, self.subscription_length)
)
@override_settings(ACCOUNTS_EMAIL='<EMAIL>')
def test_contracted_invoice_email_recipient(self):
"""
For contracted invoices, emails should be sent to <EMAIL>
"""
expected_recipient = ["<EMAIL>"]
calculate_users_in_all_domains(self.invoice_date)
tasks.generate_invoices(self.invoice_date)
self.assertEqual(Invoice.objects.count(), 1)
actual_recipient = Invoice.objects.first().email_recipients
self.assertEqual(actual_recipient, expected_recipient)
def test_contracted_invoice_email_template(self):
"""
Emails for contracted invoices should use the contracted invoices template
"""
expected_template = BillingRecord.INVOICE_CONTRACTED_HTML_TEMPLATE
calculate_users_in_all_domains(self.invoice_date)
tasks.generate_invoices(self.invoice_date)
self.assertEqual(BillingRecord.objects.count(), 1)
actual_template = BillingRecord.objects.first().html_template
self.assertTrue(actual_template, expected_template)
class TestProductLineItem(BaseInvoiceTestCase):
"""
Tests that the Product line item is properly generated and prorated (when applicable) in an invoice.
"""
def setUp(self):
super(TestProductLineItem, self).setUp()
self.product_rate = self.subscription.plan_version.product_rate
def test_standard(self):
"""
For the Product Line Item, make sure that the Product rate is not prorated:
- base_cost uses the correct monthly fee
- base_description is not None
- unit_description is None
- unit_cost is 0.0
- quantity is 1
- subtotal = monthly fee
"""
invoice_date = utils.months_from_date(self.subscription.date_start, random.randint(2, self.subscription_length))
calculate_users_in_all_domains(invoice_date)
tasks.generate_invoices(invoice_date)
invoice = self.subscription.invoice_set.latest('date_created')
product_line_items = invoice.lineitem_set.filter(feature_rate__exact=None)
self.assertEqual(product_line_items.count(), 1)
product_line_item = product_line_items.get()
self.assertIsNotNone(product_line_item.base_description)
self.assertEqual(product_line_item.base_cost, self.product_rate.monthly_fee)
self.assertIsNone(product_line_item.unit_description)
self.assertEqual(product_line_item.unit_cost, Decimal('0.0000'))
self.assertEqual(product_line_item.quantity, 1)
self.assertEqual(product_line_item.subtotal, self.product_rate.monthly_fee)
# no adjustments
self.assertEqual(product_line_item.total, self.product_rate.monthly_fee)
def test_prorate(self):
"""
Make sure that the product is prorated for the first and last invoices, which fall in a partial month:
- base_cost is 0.0
- base_description is None
- unit_description is not None
- unit_cost is prorated
- quantity > 1
- subtotal = unit_cost * quantity
"""
first_invoice_date = utils.months_from_date(self.subscription.date_start, 1)
tasks.generate_invoices(first_invoice_date)
last_invoice_date = utils.months_from_date(self.subscription.date_end, 1)
tasks.generate_invoices(last_invoice_date)
for invoice in self.subscription.invoice_set.all():
product_line_items = invoice.lineitem_set.filter(feature_rate__exact=None)
self.assertEqual(product_line_items.count(), 1)
product_line_item = product_line_items.get()
days_prorated_by_invoice_start_date = {
datetime.date(2016, 2, 23): 7,
datetime.date(2017, 5, 1): 22,
}
days_in_month_by_invoice_start_date = {
datetime.date(2016, 2, 23): 29,
datetime.date(2017, 5, 1): 31,
}
self.assertEqual(product_line_item.quantity, days_prorated_by_invoice_start_date[invoice.date_start])
self.assertEqual(
product_line_item.unit_cost,
Decimal("%.2f" % round(
self.product_rate.monthly_fee / days_in_month_by_invoice_start_date[invoice.date_start], 2
))
)
self.assertIsNotNone(product_line_item.unit_description)
self.assertEqual(product_line_item.base_cost, Decimal('0.0000'))
self.assertIsNone(product_line_item.base_description)
self.assertEqual(product_line_item.subtotal, product_line_item.unit_cost * product_line_item.quantity)
# no adjustments
self.assertEqual(product_line_item.total, product_line_item.unit_cost * product_line_item.quantity)
class TestUserLineItem(BaseInvoiceTestCase):
is_using_test_plans = True
def setUp(self):
super(TestUserLineItem, self).setUp()
self.user_rate = self.subscription.plan_version.feature_rates.filter(feature__feature_type=FeatureType.USER)[:1].get()
def test_under_limit(self):
"""
Make sure that the User rate produced:
- base_description is None
- base_cost is 0.0
- unit_cost is equal to the per_excess_fee
- quantity is equal to 0
- unit_description is None
- total and subtotals are 0.0
"""
invoice_date = utils.months_from_date(self.subscription.date_start, random.randint(2, self.subscription_length))
num_users = lambda: random.randint(0, self.user_rate.monthly_limit)
num_active = num_users()
generator.arbitrary_commcare_users_for_domain(self.domain.name, num_active)
num_inactive = num_users()
generator.arbitrary_commcare_users_for_domain(self.domain.name, num_inactive, is_active=False)
calculate_users_in_all_domains(invoice_date)
tasks.generate_invoices(invoice_date)
invoice = self.subscription.invoice_set.latest('date_created')
user_line_item = invoice.lineitem_set.get_feature_by_type(FeatureType.USER).get()
self.assertIsNone(user_line_item.base_description)
self.assertEqual(user_line_item.base_cost, Decimal('0.0000'))
self.assertIsNone(user_line_item.unit_description)
self.assertEqual(user_line_item.quantity, 0)
self.assertEqual(user_line_item.unit_cost, self.user_rate.per_excess_fee)
self.assertEqual(user_line_item.subtotal, Decimal('0.0000'))
self.assertEqual(user_line_item.total, Decimal('0.0000'))
def test_over_limit(self):
"""
Make sure that the User rate produced:
- base_description is None
- base_cost is 0.0
- unit_description is not None
- unit_cost is equal to the per_excess_fee on the user rate
- quantity is equal to number of commcare users in that domain minus the monthly_limit on the user rate
- total and subtotals are equal to number of extra users * per_excess_fee
"""
invoice_date = utils.months_from_date(self.subscription.date_start, random.randint(2, self.subscription_length))
num_users = lambda: random.randint(self.user_rate.monthly_limit + 1, self.user_rate.monthly_limit + 2)
num_active = num_users()
generator.arbitrary_commcare_users_for_domain(self.domain.name, num_active)
num_inactive = num_users()
generator.arbitrary_commcare_users_for_domain(self.domain.name, num_inactive, is_active=False)
calculate_users_in_all_domains(datetime.date(invoice_date.year, invoice_date.month, 1))
tasks.generate_invoices(invoice_date)
invoice = self.subscription.invoice_set.latest('date_created')
user_line_item = invoice.lineitem_set.get_feature_by_type(FeatureType.USER).get()
# there is no base cost
self.assertIsNone(user_line_item.base_description)
self.assertEqual(user_line_item.base_cost, Decimal('0.0000'))
num_to_charge = num_active - self.user_rate.monthly_limit
self.assertIsNotNone(user_line_item.unit_description)
self.assertEqual(user_line_item.quantity, num_to_charge)
self.assertEqual(user_line_item.unit_cost, self.user_rate.per_excess_fee)
self.assertEqual(user_line_item.subtotal, num_to_charge * self.user_rate.per_excess_fee)
self.assertEqual(user_line_item.total, num_to_charge * self.user_rate.per_excess_fee)
def test_community_over_limit(self):
"""
For a domain under community (no subscription) with users over the community limit, make sure that:
- base_description is None
- base_cost is 0.0
- unit_description is not None
- unit_cost is equal to the per_excess_fee on the user rate
- quantity is equal to number of commcare users in that domain minus the monthly_limit on the user rate
- total and subtotals are equal to number of extra users * per_excess_fee
"""
domain = generator.arbitrary_domain()
self.addCleanup(domain.delete)
num_active = generator.create_excess_community_users(domain)
account = BillingAccount.get_or_create_account_by_domain(
domain, created_by=self.dimagi_user)[0]
generator.arbitrary_contact_info(account, self.dimagi_user)
today = datetime.date.today()
account.date_confirmed_extra_charges = today
account.save()
calculate_users_in_all_domains(datetime.date(today.year, today.month, 1))
tasks.generate_invoices()
subscriber = Subscriber.objects.get(domain=domain.name)
invoice = Invoice.objects.filter(subscription__subscriber=subscriber).get()
user_line_item = invoice.lineitem_set.get_feature_by_type(FeatureType.USER).get()
self.assertIsNone(user_line_item.base_description)
self.assertEqual(user_line_item.base_cost, Decimal('0.0000'))
community_plan = DefaultProductPlan.get_default_plan_version()
num_to_charge = num_active - community_plan.user_limit
self.assertIsNotNone(user_line_item.unit_description)
self.assertEqual(user_line_item.quantity, num_to_charge)
self.assertEqual(user_line_item.unit_cost, self.user_rate.per_excess_fee)
self.assertEqual(user_line_item.subtotal, num_to_charge * self.user_rate.per_excess_fee)
self.assertEqual(user_line_item.total, num_to_charge * self.user_rate.per_excess_fee)
class TestSmsLineItem(BaseInvoiceTestCase):
@classmethod
def setUpClass(cls):
super(TestSmsLineItem, cls).setUpClass()
cls.sms_rate = cls.subscription.plan_version.feature_rates.filter(
feature__feature_type=FeatureType.SMS
).get()
cls.invoice_date = utils.months_from_date(
cls.subscription.date_start, random.randint(2, cls.subscription_length)
)
cls.sms_date = utils.months_from_date(cls.invoice_date, -1)
@classmethod
def tearDownClass(cls):
cls._delete_sms_billables()
super(TestSmsLineItem, cls).tearDownClass()
def test_under_limit(self):
"""
Make sure that the Line Item for the SMS Rate has the following:
- base_description is None
- base_cost is 0.0
- unit_description is not None
- unit_cost is 0.0
- quantity is equal to 1
- total and subtotals are 0.0
"""
num_sms = random.randint(0, self.sms_rate.monthly_limit // 2)
arbitrary_sms_billables_for_domain(
self.subscription.subscriber.domain, self.sms_date, num_sms, direction=INCOMING
)
arbitrary_sms_billables_for_domain(
self.subscription.subscriber.domain, self.sms_date, num_sms, direction=OUTGOING
)
sms_line_item = self._create_sms_line_item()
# there is no base cost
self.assertIsNone(sms_line_item.base_description)
self.assertEqual(sms_line_item.base_cost, Decimal('0.0000'))
self.assertEqual(sms_line_item.quantity, 1)
self.assertEqual(sms_line_item.unit_cost, Decimal('0.0000'))
self.assertIsNotNone(sms_line_item.unit_description)
self.assertEqual(sms_line_item.subtotal, Decimal('0.0000'))
self.assertEqual(sms_line_item.total, Decimal('0.0000'))
def test_over_limit(self):
"""
Make sure that the Line Item for the SMS Rate has the following:
- base_description is None
- base_cost is 0.0
- unit_description is not None
- unit_cost is greater than 0.0
- quantity is equal to 1
- total and subtotals are greater than zero
"""
num_sms = random.randint(self.sms_rate.monthly_limit + 1, self.sms_rate.monthly_limit + 2)
billables = arbitrary_sms_billables_for_domain(
self.subscription.subscriber.domain, self.sms_date, num_sms
)
sms_line_item | |
to control who can delete which keys.
Supported methods:
DELETE: /{mount_point}/keys/{name}. Produces: 204 (empty body)
:param name: Specifies the name of the encryption key to delete. This is specified as part of the URL.
:type name: str | unicode
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The response of the request.
:rtype: requests.Response
"""
# JSON parameters to the plugin.
api_path = format_url(
'/v1/{mount_point}/keys/{name}',
mount_point=mount_point,
name=name,
)
# The actual call to the plugin.
return self._adapter.delete(
url=api_path,
)
def delete_subkey(self, name, key_id, mount_point=DEFAULT_MOUNT_POINT):
"""Delete the given subkey associated with the given master key.
Because this is a potentially catastrophic operation, use Vault policies instead to control who can
delete which keys.
Supported methods:
DELETE: /{mount_point}/keys/{name}/subkeys/{key_id}. Produces: 204 (empty body)
:param name: Specifies the name of the master key with which the subkey is associated. This is specified as part of the URL.
:type name: str | unicode
:param key_id: Specifies Specifies the Key ID of the subkey. This is specified as part of the URL.
:type key_id: str | unicode
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The response of the request.
:rtype: requests.Response
"""
# JSON parameters to the plugin.
api_path = format_url(
'/v1/{mount_point}/keys/{name}/subkeys/{key_id}',
mount_point=mount_point,
name=name,
key_id=key_id,
)
# The actual call to the plugin.
return self._adapter.delete(
url=api_path,
)
def update_key_configuration(self, name, min_decryption_version=None, min_encryption_version=None, deletion_allowed=None,
exportable=None, allow_plaintext_backup=None, mount_point=DEFAULT_MOUNT_POINT):
raise NotImplementedError
def rotate_key(self, name, mount_point=DEFAULT_MOUNT_POINT):
raise NotImplementedError
def export_key(self, name, key_type=None, version=None, mount_point=DEFAULT_MOUNT_POINT):
"""Return the named key.
The keys object shows the value of the key for each version. If version is specified, the specific version will
be returned. If latest is provided as the version, the current key will be provided. Depending on the type of
key, different information may be returned. The key must be exportable to support this operation and the version
must still be valid.
Supported methods:
GET: /{mount_point}/export/{key_type}/{name}(/{version}). Produces: 200 application/json
:param name: Specifies the name of the key to read information about. This is specified as part of the URL.
:type name: str | unicode
:param key_type: Specifies the type of the key to export. This is specified as part of the URL. Valid values are:
encryption-key
signing-key
Validated but ignored at the time of writing, so it has no effect.
:type key_type: str | unicode
:param version: Specifies the version of the key to read. If omitted, all versions of the key will be returned.
If the version is set to latest, the current key will be returned. Not supported at the time of writing.
:type version: str | unicode
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The JSON response of the request.
:rtype: dict
"""
# Unsupported parameters.
if version is not None:
raise UnsupportedParam('key versions not supported')
# Validated but ignored for now.
if key_type is not None and key_type not in ALLOWED_EXPORT_KEY_TYPES:
error_msg = 'invalid key_type argument provided "{arg}", supported types: "{allowed_types}"'
raise ParamValidationError(error_msg.format(
arg=key_type,
allowed_types=', '.join(ALLOWED_EXPORT_KEY_TYPES),
))
# JSON parameters to the plugin.
# NOTE: {key_type} is NOT part of the URL, unlike with Transit Secrets Engine.
api_path = format_url(
'/v1/{mount_point}/export/{name}',
mount_point=mount_point,
key_type=key_type,
name=name,
)
# The actual call to the plugin.
if version is not None:
api_path = self._adapter.urljoin(api_path, version)
return self._adapter.get(
url=api_path,
)
def encrypt_data(self, name, plaintext, context=None, key_version=None, nonce=None, batch_input=None, type=None,
convergent_encryption=None, mount_point=DEFAULT_MOUNT_POINT):
raise NotImplementedError
def decrypt_data(self, name, ciphertext, context=None, nonce=None, batch_input=None, mount_point=DEFAULT_MOUNT_POINT):
raise NotImplementedError
def rewrap_data(self, name, ciphertext, context=None, key_version=None, nonce=None, batch_input=None,
mount_point=DEFAULT_MOUNT_POINT):
raise NotImplementedError
def generate_data_key(self, name, key_type, context=None, nonce=None, bits=None, mount_point=DEFAULT_MOUNT_POINT):
raise NotImplementedError
def generate_random_bytes(self, n_bytes=None, output_format=None, mount_point=DEFAULT_MOUNT_POINT):
raise NotImplementedError
def hash_data(self, hash_input, algorithm=None, output_format=None, mount_point=DEFAULT_MOUNT_POINT):
raise NotImplementedError
def generate_hmac(self, name, hash_input, key_version=None, algorithm=None, mount_point=DEFAULT_MOUNT_POINT):
raise NotImplementedError
def sign_data(self, name, hash_input, key_version=None, hash_algorithm='sha2-512', context=None, prehashed=None,
signature_algorithm=None, marshaling_algorithm='ascii-armor', expires=365*24*60*60,
mount_point=DEFAULT_MOUNT_POINT):
"""Return the cryptographic signature of the given data using the named key and the specified hash algorithm.
The key must be of a type that supports signing. Either the first available signing subkey, or the master
key (which should support signing), is chosen.
Supported methods:
POST: /{mount_point}/sign/{name}(/{hash_algorithm}). Produces: 200 application/json
:param name: Specifies the name of the encryption key to use for signing. This is specified as part of the URL.
:type name: str | unicode
:param hash_input: Specifies the base64 encoded input data.
:type hash_input: str | unicode
:param key_version: Specifies the version of the key to use for signing. If not set, uses the latest version.
Must be greater than or equal to the key's min_encryption_version, if set.
Not supported at the time of writing.
:type key_version: int
:param hash_algorithm: Specifies the hash algorithm to use for supporting key types (notably, not including
ed25519 which specifies its own hash algorithm). This can also be specified as part of the URL.
Currently-supported algorithms are: sha2-224, sha2-256, sha2-384, sha2-512
:type hash_algorithm: str | unicode
:param context: Base64 encoded context for key derivation. Required if key derivation is enabled; currently only
available with ed25519 keys. Not supported at the time of writing.
:type context: str | unicode
:param prehashed: Set to true when the input is already hashed. If the key type is rsa-2048 or rsa-4096, then
the algorithm used to hash the input should be indicated by the hash_algorithm parameter. Just as the value
to sign should be the base64-encoded representation of the exact binary data you want signed, when set, input
is expected to be base64-encoded binary hashed data, not hex-formatted. (As an example, on the command line,
you could generate a suitable input via openssl dgst -sha256 -binary | base64.)
Not supported at the time of writing.
:type prehashed: bool
:param signature_algorithm: When using a RSA key, specifies the RSA signature algorithm to use for signing.
Supported signature types are: pkcs1v15
:type signature_algorithm: str | unicode
:param marshaling_algorithm: Specifies the way in which the signature should be marshaled.
Supported types are: ascii-armor, base64
:type marshaling_algorithm: str | unicode
:param expires: Specifies the number of seconds from the creation time (now) after which the signature expires.
If the number is zero, then the signature never expires. By default, signatures expire in a year.
:type expires: int
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The JSON response of the request.
:rtype: dict
"""
# Unsupported parameters.
if key_version is not None:
raise UnsupportedParam('key versions not supported')
if context is not None:
raise UnsupportedParam('context for key derivation not supported')
if prehashed is not None:
raise UnsupportedParam('prehashed input not supported')
if hash_algorithm is not None and hash_algorithm not in ALLOWED_HASH_DATA_ALGORITHMS:
error_msg = 'invalid hash_algorithm argument provided "{arg}", supported types: "{allowed_types}"'
raise ParamValidationError(error_msg.format(
arg=hash_algorithm,
allowed_types=', '.join(ALLOWED_HASH_DATA_ALGORITHMS),
))
# Validated but ignored for now.
if signature_algorithm is not None and signature_algorithm not in ALLOWED_SIGNATURE_ALGORITHMS:
error_msg = 'invalid signature_algorithm argument provided "{arg}", supported types: "{allowed_types}"'
raise ParamValidationError(error_msg.format(
arg=signature_algorithm,
allowed_types=', '.join(ALLOWED_SIGNATURE_ALGORITHMS),
))
if marshaling_algorithm is not None and marshaling_algorithm not in ALLOWED_MARSHALING_ALGORITHMS:
error_msg = 'invalid marshaling_algorithm argument provided "{arg}", supported types: "{allowed_types}"'
raise ParamValidationError(error_msg.format(
arg=marshaling_algorithm,
allowed_types=', '.join(ALLOWED_MARSHALING_ALGORITHMS),
))
# JSON parameters to the plugin.
params = {
'input': hash_input,
}
params.update(
remove_nones({
'algorithm': hash_algorithm,
'format': marshaling_algorithm,
'expires': expires,
})
)
# The actual call to the plugin.
api_path = format_url(
'/v1/{mount_point}/sign/{name}',
mount_point=mount_point,
name=name,
)
return self._adapter.post(
url=api_path,
json=params,
)
def verify_signed_data(self, name, hash_input, signature=None, hmac=None, hash_algorithm=None, context=None,
prehashed=None, signature_algorithm=None, marshaling_algorithm='ascii-armor',
mount_point=DEFAULT_MOUNT_POINT):
"""Return whether the provided signature is valid for the given data.
Supported methods:
POST: /{mount_point}/verify/{name}(/{hash_algorithm}). Produces: 200 application/json
:param name: Specifies the name of the encryption key that was used to generate the signature or HMAC.
:type name: str | unicode
:param hash_input: Specifies the base64 encoded input data.
:type input: str | unicode
:param signature: Specifies the signature output from the /transit/sign function. Either this must | |
<reponame>GO-Eratosthenes/dhdt<filename>dhdt/postprocessing/solar_tools.py
import numpy as np
from pysolar.solar import get_azimuth, get_altitude
from datetime import datetime
from pytz import timezone
from scipy import ndimage
from scipy.spatial.transform import Rotation
from skimage import transform
# general location functions
def annual_solar_graph(latitude=51.707524, longitude=6.244362, deg_sep=.5,
year = 2018, sec_resol=20):
""" calculate the solar graph of a location
Parameters
----------
latitude : float, unit=degrees, range=-90...+90
latitude of the location of interest
longitude : float, unit=degrees, range=-180...+180
longitude of the location of interest
deg_sep : float, unit=degrees
resolution of the solargraph grid
year : integer
year of interest
sec_resol : float, unit=seconds, default=20
resolution of sun location calculation
Returns
-------
Sky : np.array, size=(k,l), dtype=integer
array with solargraph
az : np.array, size=(l,_), dtype=float, unit=degrees
azimuth values, that is the axis ticks of the solar graph
zenit : np.array, size=(k,_), dtype=float, unit=degrees
zenit values, that is the axis ticks of the solar graph
"""
az = np.arange(0, 360, deg_sep)
zn = np.flip(np.arange(-.5, +90, deg_sep))
Sol = np.zeros((zn.shape[0], az.shape[0]))
month = np.array([12, 6]) # 21/12 typical winter solstice - lower bound
day = np.array([21, 21]) # 21/06 typical summer solstice - upper bound
# loop through all times to get sun paths
for i in range(0,2):
for hour in range(0, 24):
for minu in range(0, 60):
for sec in range(0, 60, sec_resol):
sun_zen = get_altitude(latitude, longitude, \
datetime(year, month[i], day[i], \
hour, minu, sec, \
tzinfo=timezone('UTC')))
sun_azi = get_azimuth(latitude, longitude, \
datetime(year, month[i], day[i], \
hour, minu, sec, \
tzinfo=timezone('UTC')))
az_id = (np.abs(az - sun_azi)).argmin()
zn_id = (np.abs(zn - sun_zen)).argmin()
if i==0:
Sol[zn_id,az_id] = -1
else:
Sol[zn_id,az_id] = +1
# remove the line below the horizon
Sol = Sol[:-1,:]
# mathematical morphology to do infilling, and extent the boundaries a bit
Sol_plu, Sol_min = Sol==+1, Sol==-1
Sol_plu = ndimage.binary_dilation(Sol_plu, np.ones((5,5))).cumsum(axis=0)==1
Sol_min = np.flipud(ndimage.binary_dilation(Sol_min, np.ones((5,5))))
Sol_min = np.flipud(Sol_min.cumsum(axis=0)==1)
# populated the solargraph between the upper and lower bound
Sky = np.zeros(Sol.shape)
for i in range(0,Sol.shape[1]):
mat_idx = np.where(Sol_plu[:,i]==+1)
if len(mat_idx[0]) > 0:
start_idx = mat_idx[0][0]
mat_idx = np.where(Sol_min[:,i]==1)
if len(mat_idx[0]) > 0:
end_idx = mat_idx[0][-1]
else:
end_idx = Sol.shape[1]
Sky[start_idx:end_idx,i] = 1
return Sky, az, zn
def az_to_sun_vector(az, indexing='ij'):
""" transform azimuth angle to 2D-unit vector
Parameters
----------
az : float, unit=degrees
azimuth of sun.
indexing : {‘xy’, ‘ij’}
* "xy" : using map coordinates
* "ij" : using local image coordinates
Returns
-------
sun : numpy.array, size=(2,1), range=0...1
unit vector in the direction of the sun.
See Also
--------
sun_angles_to_vector
Notes
-----
The azimuth angle declared in the following coordinate frame:
.. code-block:: text
^ North & y
|
- <--┼--> +
|
┼----> East & x
The angles related to the suns' heading are as follows:
.. code-block:: text
surface normal * sun
^ ^ /
| | /
├-- zenith angle | /
| / | /|
|/ |/ | elevation angle
└---- └--┴---
Two different coordinate system are used here:
.. code-block:: text
indexing | indexing ^ y
system 'ij'| system 'xy' |
| |
| i | x
--------┼--------> --------┼-------->
| |
| |
image | j map |
based v based |
"""
if indexing=='ij':
sun = np.array([[ -np.cos(np.radians(az)) ],
[ +np.sin(np.radians(az)) ]])
else: # 'xy' that is a map coordinate system
sun = np.array([[ +np.sin(np.radians(az)) ],
[ +np.cos(np.radians(az)) ]])
return sun
def sun_angles_to_vector(az, zn, indexing='ij'):
""" transform azimuth and zenith angle to 3D-unit vector
Parameters
----------
az : float, unit=degrees
azimuth angle of sun.
zn : float, unit=degrees
zenith angle of sun.
indexing : {‘xy’, ‘ij’}
* "xy" : using map coordinates
* "ij" : using local image coordinates
Returns
-------
sun : numpy.array, size=(3,1), dtype=float, range=0...1
unit vector in the direction of the sun.
See Also
--------
az_to_sun_vector
Notes
-----
The azimuth angle declared in the following coordinate frame:
.. code-block:: text
^ North & y
|
- <--┼--> +
|
┼----> East & x
The angles related to the sun are as follows:
.. code-block:: text
surface normal * sun
^ ^ /
| | /
├-- zenith angle | /
| / | /|
|/ |/ | elevation angle
└---- surface ----- └--┴---
Two different coordinate system are used here:
.. code-block:: text
indexing | indexing ^ y
system 'ij'| system 'xy' |
| |
| i | x
--------┼--------> --------┼-------->
| |
| |
image | j map |
based v based |
"""
if indexing=='ij': # local image system
sun = np.dstack((-np.cos(np.radians(az)), \
+np.sin(np.radians(az)), \
+np.tan(np.radians(zn)))
)
else: # 'xy' that is map coordinates
sun = np.dstack((+np.sin(np.radians(az)), \
+np.cos(np.radians(az)), \
+np.tan(np.radians(zn)))
)
n = np.linalg.norm(sun, axis=2)
sun[:, :, 0] /= n
sun[:, :, 1] /= n
sun[:, :, 2] /= n
return sun
# elevation model based functions
def make_shadowing(Z, az, zn, spac=10):
""" create synthetic shadow image from given sun angles
Parameters
----------
Z : numpy.array, size=(m,n), dtype={integer,float}
grid with elevation data
az : float, unit=degrees
azimuth angle
zn : float, unit=degrees
zenith angle
spac : float, optional
resolution of the square grid. The default is 10.
Returns
-------
Sw : numpy.array, size=(m,n), dtype=bool
estimated shadow grid
Notes
-----
The azimuth angle declared in the following coordinate frame:
.. code-block:: text
^ North & y
|
- <--┼--> +
|
┼----> East & x
The angles related to the sun are as follows:
.. code-block:: text
surface normal * sun
^ ^ /
| | /
├-- zenith angle | /
| / | /|
|/ |/ | elevation angle
└---- └--┴---
"""
Zr = ndimage.rotate(Z, az, axes=(1, 0), cval=-1, order=3)
# mask based
Mr = ndimage.rotate(np.zeros(Z.shape, dtype=bool), az, axes=(1, 0), \
cval=False, order=0, prefilter=False)
dZ = np.tan(np.radians(90-zn))*spac
for i in range(1,Zr.shape[0]):
Mr[i,:] = (Zr[i,:])<(Zr[i-1,:]-dZ)
Zr[i,:] = np.maximum(Zr[i,:], Zr[i-1,:]-dZ)
Ms = ndimage.interpolation.rotate(Mr, -az, axes=(1, 0), cval=False, order=0, \
mode='constant', prefilter=False)
i_min = int(np.floor((Ms.shape[0] - Z.shape[0]) / 2))
i_max = int(np.floor((Ms.shape[0] + Z.shape[0]) / 2))
j_min = int(np.floor((Ms.shape[1] - Z.shape[1]) / 2))
j_max = int(np.floor((Ms.shape[1] + Z.shape[1]) / 2))
Sw = Ms[i_min:i_max, j_min:j_max]
return Sw
def make_shading(Z, az, zn, spac=10):
""" create synthetic shading image from given sun angles
A simple Lambertian reflection model is used here.
Parameters
----------
Z : numpy.array, size=(m,n), dtype={integer,float}, unit=meter
grid with elevation data
az : float, unit=degrees
azimuth angle
zn : float, unit=degrees
zenith angle
spac : float, default=10, unit=meter
resolution of the square grid.
Returns
-------
Sh : numpy.array, size=(m,n), dtype=float, range=0...1
estimated shading grid
Notes
-----
The azimuth angle declared in the following coordinate frame:
.. code-block:: text
^ North & y
|
- <--┼--> +
|
┼----> East & x
The angles related to the sun are as follows:
.. code-block:: text
surface normal * sun
^ ^ /
| | /
├-- zenith angle | /
| / | /|
|/ |/ | elevation angle
└---- └--┴---
"""
sun = sun_angles_to_vector(az, zn, indexing='xy')
# estimate surface normals
# the first array stands for the gradient in rows and
# the second one in columns direction
dy, dx = np.gradient(Z*spac)
normal = np.dstack((dx, dy, np.ones_like(Z)))
n = np.linalg.norm(normal, axis=2)
normal[:, :, 0] /= n
normal[:, :, 1] /= n
normal[:, :, 2] /= n
Sh = normal[:,:,0]*sun[:,:,0] + \
normal[:,:,1]*sun[:,:,1] + \
normal[:,:,2]*sun[:,:,2]
return Sh
def make_doppler_range(Z, az, zn, Lambertian=True, spac=10):
"""
Parameters
----------
Z : numpy.array, unit=meters
array with elevation values
az : float, unit=degrees, range=-180...+180
flight orientation of the satellite
zn : {float,array}, unit=degrees, range=0...+90
illumination angle from the satellite
Returns
-------
Notes
-----
"""
# rotate
Z_r = ndimage.rotate(Z, az, axes=(1, 0), cval=-1, order=3)
# mask based
M_r = ndimage.rotate(np.ones_like(Z, dtype=bool), az, axes=(1, 0), \
cval=False, order=0, prefilter=False)
K_r = np.fliplr(np.meshgrid(np.linspace(0,M_r.shape[0]-1,M_r.shape[0]),
np.linspace(0,M_r.shape[1]-1,M_r.shape[1]))[0])
np.putmask(K_r, ~M_r, 0)
D_r = np.multiply(np.cos(np.deg2rad(zn)), Z_r) + \
np.multiply(np.sin(np.deg2rad(zn)), K_r*spac)
if Lambertian: # do a weighted histogram
Sd = make_shading(Z, az, zn, spac=10)
Sd_r = ndimage.rotate(Sd, az, axes=(1, 0), cval=-1, order=3)
np.putmask(Sd_r, ~M_r, 0)
# loop through the rows and create histogram
S_r = np.zeros_like(Z_r, dtype=float)
for i in range(Z_r.shape[0]):
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.