input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
from wallaby import *
import constants as c
import movement as m
#---------------------------------------------States-------------------------------------------
def BlackLeft():
return(get_create_lcliff_amt() < c.LCLIFF_BW)
def NotBlackLeft():
return(get_create_lcliff_amt() > c.LCLIFF_BW)
def BlackRight():
return(get_create_rcliff_amt() < c.RCLIFF_BW)
def NotBlackRight():
return(get_create_rcliff_amt() > c.RCLIFF_BW)
def BlackFrontLeft():
return(get_create_lfcliff_amt() < c.LFCLIFF_BW)
def NotBlackFrontLeft():
return(get_create_lfcliff_amt() > c.LFCLIFF_BW)
def BlackFrontRight():
return(get_create_rfcliff_amt() < c.RFCLIFF_BW)
def NotBlackFrontRight():
return(get_create_rfcliff_amt() > c.RFCLIFF_BW)
def BumpedLeft():
return(get_create_lbump() == 1)
def BumpedLightLeft():
return(get_create_lclightbump() == 1)
def BumpedLightFrontLeft():
return(get_create_lflightbump() == 1)
def BumpedLightFrontRight():
return(get_create_rflightbump() == 1)
def NotBumpedLeft():
return(get_create_lbump() == 0)
def BumpedRight():
return(get_create_rbump() == 1)
def BumpedLightRight():
return(get_create_rclightbump() == 1)
def NotBumpedRight():
return(get_create_rbump() == 0)
def DepthSensesObject():
return(analog(c.DEPTH_SENSOR) > c.DEPTH_CF)
def NotDepthSensesObject():
return(analog(c.DEPTH_SENSOR) < c.DEPTH_CF)
def RightDepthSensesObject():
return(analog(c.RIGHT_DEPTH_SENSOR) > c.RIGHT_DEPTH_CF)
def NotRightDepthSensesObject():
return(analog(c.RIGHT_DEPTH_SENSOR) < c.RIGHT_DEPTH_CF)
#---------------------------------------------Driving Sensor Functions-------------------------------------------
def forwards_until_black_lcliff():
print "Start drive_until_black_lcliff"
m.base_forwards()
while NotBlackLeft():
pass
m.deactivate_motors()
def forwards_until_white_lcliff():
print "Start forwards_until_white_lcliff"
m.base_forwards()
while BlackLeft():
pass
m.deactivate_motors()
def forwards_until_black_rcliff():
print "Start drive_until_black_rcliff"
m.base_forwards()
while NotBlackRight():
pass
m.deactivate_motors()
def forwards_until_white_rcliff():
print "Start drive_until_black_rcliff"
m.base_forwards()
while BlackRight():
pass
m.deactivate_motors()
def forwards_until_black_lfcliff():
print "Start forwards_until_black_lfcliff"
m.base_forwards()
while NotBlackFrontLeft():
pass
m.deactivate_motors()
def forwards_until_white_lfcliff():
print "Start forwards_until_white_lfcliff"
m.base_forwards()
while BlackFrontLeft():
pass
m.deactivate_motors()
def forwards_until_black_rfcliff():
print "Start forwards_until_black_rfcliff"
m.base_forwards()
while NotBlackFrontRight():
pass
m.deactivate_motors()
def forwards_until_white_rfcliff():
print "Start forwards_until_white_rfcliff"
m.base_forwards()
while BlackFrontRight():
pass
m.deactivate_motors()
def backwards_until_black_lcliff():
print "Start drive_until_black_lcliff"
m.base_backwards()
while NotBlackLeft():
pass
m.deactivate_motors()
def backwards_until_white_lcliff():
print "Start drive_until_black_lcliff"
m.base_backwards()
while BlackLeft():
pass
m.deactivate_motors()
def backwards_until_black_rcliff():
print "Start drive_until_black_rcliff"
m.base_backwards()
while NotBlackRight():
pass
m.deactivate_motors()
def backwards_until_white_rcliff():
print "Start drive_until_black_rcliff"
m.base_backwards()
while BlackRight():
pass
m.deactivate_motors()
def backwards_until_black_lfcliff():
print "Start drive_until_black_lfcliff"
m.base_backwards()
while NotBlackFrontLeft():
pass
m.deactivate_motors()
def backwards_until_white_lfcliff():
print "Start backwards_until_white_lfcliff"
m.base_backwards()
while BlackFrontLeft():
pass
m.deactivate_motors()
def backwards_until_black_rfcliff():
print "Start backwards_until_black_rfcliff"
m.base_backwards()
while NotBlackFrontRight():
pass
m.deactivate_motors()
def backwards_until_white_rfcliff():
print "Start backwards_until_white_rfcliff"
m.base_backwards()
while BlackFrontRight():
pass
m.deactivate_motors()
def forwards_until_black_cliffs():
# Goes forwards until both sensors have sensed black.
print "Starting forwards_until_black_cliffs()"
m.base_forwards()
while NotBlackLeft() and NotBlackRight():
pass
if BlackLeft():
while NotBlackRight():
pass
else:
while NotBlackLeft():
pass
m.deactivate_motors()
def forwards_until_black_fcliffs():
print "Starting forwards_until_black_fcliffs()"
m.base_forwards()
while NotBlackFrontLeft() and NotBlackFrontRight():
pass
if BlackFrontLeft():
while NotBlackFrontRight():
pass
else:
while NotBlackFrontLeft():
pass
m.deactivate_motors()
def backwards_until_black_cliffs():
# Goes backwards until both sensors have sensed black.
print "Starting backwards_until_black_cliffs()"
m.base_backwards()
while NotBlackLeft() and NotBlackRight():
pass
m.deactivate_motors()
def backwards_until_black_fcliffs():
print "Starting backwards_until_black_fcliffs()"
m.base_backwards()
while NotBlackFrontLeft() and NotBlackFrontRight():
pass
if BlackFrontLeft():
while NotBlackFrontRight():
pass
else:
while NotBlackFrontLeft():
pass
m.deactivate_motors()
def forwards_through_line_lcliff():
forwards_until_black_lcliff()
forwards_until_white_lcliff()
def forwards_through_line_rcliff():
forwards_until_black_rcliff()
forwards_until_white_rcliff()
def forwards_through_line_lfcliff():
forwards_until_black_lfcliff()
forwards_until_white_lfcliff()
def forwards_through_line_rfcliff():
forwards_until_black_rcliff()
forwards_until_white_rfcliff()
def backwards_through_line_lcliff():
backwards_until_black_lcliff()
backwards_until_white_lcliff()
def backwards_through_line_rcliff():
backwards_until_black_rcliff()
backwards_until_white_rcliff()
def backwards_through_line_lfcliff():
backwards_until_black_lfcliff()
backwards_until_white_lfcliff()
def backwards_through_line_rfcliff():
backwards_until_black_rfcliff()
backwards_until_white_rfcliff()
#---------------------------------------------Line Follow Functions-------------------------------------------
def lfollow_left(time, refresh_rate=c.LFOLLOW_REFRESH_RATE): # Line follow with the left cliff for time
print "Starting lfollow_left()\n"
sec = seconds() + time
while seconds() < sec:
if BlackLeft():
m.av(c.RIGHT_MOTOR, c.BASE_RM_POWER)
elif NotBlackLeft():
m.av(c.LEFT_MOTOR, c.BASE_LM_POWER)
msleep(refresh_rate)
deactivate_motors()
def lfollow_left_front(time, refresh_rate=c.LFOLLOW_REFRESH_RATE): # Line follow with the left cliff for time
print "Starting lfollow_left()\n"
sec = seconds() + time
while seconds() < sec:
if BlackFrontLeft():
m.av(c.RIGHT_MOTOR, c.BASE_RM_POWER)
elif NotBlackFrontLeft():
m.av(c.LEFT_MOTOR, c.BASE_LM_POWER)
msleep(refresh_rate)
deactivate_motors()
def lfollow_left_inside_line(time, refresh_rate=c.LFOLLOW_REFRESH_RATE):
sec = seconds() + time
while seconds() < sec:
if BlackLeft():
m.av(c.LEFT_MOTOR, c.BASE_LM_POWER)
else:
m.av(c.RIGHT_MOTOR, c.BASE_RM_POWER)
msleep(refresh_rate)
deactivate_motors()
def lfollow_right(time, refresh_rate=c.LFOLLOW_REFRESH_RATE): # Line follow with the right cliff for time
print "Starting lfollow_right()\n"
sec = seconds() + time
while seconds() < sec:
if BlackRight():
m.av(c.RIGHT_MOTOR, -1 * c.BASE_RM_POWER)
elif not BlackRight():
m.av(c.LEFT_MOTOR, -1 * c.BASE_LM_POWER)
msleep(refresh_rate)
deactivate_motors()
def lfollow_lfcliff_smooth_until_rfcliff_senses_black():
print "Starting lfollow_lfcliff_smooth_until_rfcliff_senses_black()"
while NotBlackFrontRight():
if BlackFrontLeft():
create_drive_direct(c.BASE_LM_POWER, c.LFOLLOW_SMOOTH_RM_POWER)
else:
create_drive_direct(c.LFOLLOW_SMOOTH_LM_POWER, c.BASE_RM_POWER)
def lfollow_lfcliff_smooth(time):
print "Starting lfollow_lfcliff_smooth_until_rfcliff_senses_black()"
sec = seconds() + time
while seconds() < sec:
if BlackFrontLeft():
create_drive_direct(c.BASE_LM_POWER, c.LFOLLOW_SMOOTH_RM_POWER)
else:
create_drive_direct(c.LFOLLOW_SMOOTH_LM_POWER, c.BASE_RM_POWER)
def lfollow_lfcliff_smooth_until_rfcliff_senses_white():
print "Starting lfollow_lfcliff_smooth_until_rfcliff_senses_white()"
while BlackFrontRight():
if BlackFrontLeft():
create_drive_direct(c.BASE_LM_POWER, c.LFOLLOW_SMOOTH_RM_POWER)
else:
create_drive_direct(c.LFOLLOW_SMOOTH_LM_POWER, c.BASE_RM_POWER)
#---------------------------------------------Depth Functions-------------------------------------------
def backwards_until_depth():
m.base_backwards()
while NotDepthSensesObject():
pass
m.deactivate_motors()
def backwards_until_not_depth():
m.base_backwards()
while DepthSensesObject():
pass
m.deactivate_motors()
def forwards_until_depth():
m.base_forwards()
while NotDepthSensesObject():
pass
m.deactivate_motors()
def lfollow_lfcliff_smooth_until_depth():
print "Starting lfollow_lfcliff_smooth_until_rfcliff_senses_black()"
while NotDepthSensesObject():
if BlackFrontLeft():
create_drive_direct(c.BASE_LM_POWER, c.LFOLLOW_SMOOTH_RM_POWER)
else:
create_drive_direct(c.LFOLLOW_SMOOTH_LM_POWER, c.BASE_RM_POWER)
def wait_for_depth(time=15):
print "Starting wait_for_depth()"
sec = seconds() + time
while NotRightDepthSensesObject() and seconds() < sec:
pass
def wait_for_not_depth(time=7):
print "Starting wait_for_empty()"
sec = seconds() + time
while RightDepthSensesObject() and seconds() < sec:
pass
#----------------------------------------------Bump-------------------------------------------
def forwards_until_bump():
print "Starting forwards_until_bump()"
m.base_forwards()
while NotBumpedLeft() and NotBumpedRight():
pass
m.deactivate_motors()
def wfollow_left(time, refresh_rate=c.LFOLLOW_REFRESH_RATE):
print "Starting wfollow_left()"
sec = seconds() + time / 1000.0
while seconds() < sec:
if BumpedLeft() or BumpedLightLeft() or BumpedLightFrontLeft():
m.activate_motors(c.BASE_LM_POWER, int(c.LFOLLOW_SMOOTH_RM_POWER * 0.6))
else:
m.activate_motors(int(c.LFOLLOW_SMOOTH_LM_POWER * 0.5), c.BASE_RM_POWER)
msleep(refresh_rate)
m.deactivate_motors()
def wfollow_left_until_black_right_front(time=15000, refresh_rate=c.LFOLLOW_REFRESH_RATE):
print "Starting wfollow_left_until_black_right_front()"
sec = seconds() + time / 1000.0
while seconds() < sec and NotBlackFrontRight():
if BumpedLeft() or BumpedLightLeft() or BumpedLightFrontLeft():
m.activate_motors(c.BASE_LM_POWER, int(c.LFOLLOW_SMOOTH_RM_POWER * 0.6))
else:
m.activate_motors(int(c.LFOLLOW_SMOOTH_LM_POWER * 0.5), c.BASE_RM_POWER)
msleep(refresh_rate)
m.deactivate_motors()
def wfollow_left_until_white_right_front(time=15000, refresh_rate=c.LFOLLOW_REFRESH_RATE):
print "Starting wfollow_left_until_white_right_front()"
sec = seconds() + time / 1000.0
while seconds() < sec and BlackFrontRight():
if BumpedLeft() or BumpedLightLeft() or BumpedLightFrontLeft():
m.activate_motors(c.BASE_LM_POWER, int(c.LFOLLOW_SMOOTH_RM_POWER * 0.6))
else:
m.activate_motors(int(c.LFOLLOW_SMOOTH_LM_POWER * 0.5), c.BASE_RM_POWER)
msleep(refresh_rate)
m.deactivate_motors()
def wfollow_right_until_black_left_front(time=15000, refresh_rate=c.LFOLLOW_REFRESH_RATE):
print "Starting wfollow_right_until_black_left_front()"
sec = seconds() + time / 1000.0
while seconds() < sec and NotBlackFrontLeft():
if BumpedRight() or BumpedLightRight() or BumpedLightFrontRight():
m.activate_motors(int(c.LFOLLOW_SMOOTH_RM_POWER * 0.6), c.BASE_RM_POWER)
else:
m.activate_motors(c.BASE_LM_POWER, int(c.LFOLLOW_SMOOTH_RM_POWER * 0.5))
msleep(refresh_rate)
m.deactivate_motors()
def wfollow_right_until_white_left_front(time=15000, refresh_rate=c.LFOLLOW_REFRESH_RATE):
print "Starting wfollow_right_until_white_left_front()"
sec = seconds() + time / 1000.0
while seconds() < sec and BlackFrontLeft():
if BumpedRight() or BumpedLightRight() or BumpedLightFrontRight():
m.activate_motors(int(c.LFOLLOW_SMOOTH_RM_POWER * 0.6), c.BASE_RM_POWER)
else:
m.activate_motors(c.BASE_LM_POWER, int(c.LFOLLOW_SMOOTH_RM_POWER * 0.5))
msleep(refresh_rate)
m.deactivate_motors()
def wfollow_right_until_black_left(time=15000, refresh_rate=c.LFOLLOW_REFRESH_RATE):
print "Starting wfollow_right_until_black_left()"
sec = seconds() + time / 1000.0
while seconds() < sec and NotBlackLeft():
if BumpedRight() or BumpedLightRight() or BumpedLightFrontRight():
m.activate_motors(int(c.LFOLLOW_SMOOTH_RM_POWER * 0.6), c.BASE_RM_POWER)
else:
m.activate_motors(c.BASE_LM_POWER, int(c.LFOLLOW_SMOOTH_RM_POWER * 0.5))
msleep(refresh_rate)
m.deactivate_motors()
def wfollow_right_until_white_left(time=15000, refresh_rate=c.LFOLLOW_REFRESH_RATE):
print "Starting wfollow_right_until_white_left()"
sec = seconds() + time / 1000.0
while seconds() < sec and BlackLeft():
if BumpedRight() or BumpedLightRight() or BumpedLightFrontRight():
m.activate_motors(int(c.LFOLLOW_SMOOTH_RM_POWER * 0.6), c.BASE_RM_POWER)
else:
m.activate_motors(c.BASE_LM_POWER, int(c.LFOLLOW_SMOOTH_RM_POWER * 0.5))
msleep(refresh_rate)
m.deactivate_motors()
def wfollow_right_until_black_right(time=15000, refresh_rate=c.LFOLLOW_REFRESH_RATE):
print "Starting wfollow_right_until_black_right()"
sec = seconds() + time / 1000.0
while seconds() < sec and NotBlackRight():
if BumpedRight() or BumpedLightRight() or BumpedLightFrontRight():
m.activate_motors(int(c.LFOLLOW_SMOOTH_RM_POWER * 0.6), c.BASE_RM_POWER)
else:
m.activate_motors(c.BASE_LM_POWER, int(c.LFOLLOW_SMOOTH_RM_POWER * 0.5))
msleep(refresh_rate)
m.deactivate_motors()
def wfollow_right_until_white_right(time=15000, refresh_rate=c.LFOLLOW_REFRESH_RATE):
print "Starting wfollow_right_until_white_right()"
sec = seconds() + time / 1000.0
while seconds() < sec and BlackRight():
if BumpedRight() or BumpedLightRight() or BumpedLightFrontRight():
m.activate_motors(int(c.LFOLLOW_SMOOTH_RM_POWER * 0.6), c.BASE_RM_POWER)
else:
m.activate_motors(c.BASE_LM_POWER, int(c.LFOLLOW_SMOOTH_RM_POWER * 0.5))
msleep(refresh_rate)
m.deactivate_motors()
#----------------------------------------------Align Functions-------------------------------------------
def align_close_fcliffs():
left_front_forwards_until_black()
right_front_forwards_until_black()
left_front_backwards_until_white()
right_front_backwards_until_white()
def align_far_fcliffs():
left_front_forwards_until_white()
right_front_forwards_until_white()
left_front_backwards_until_black()
right_front_backwards_until_black()
def align_close_cliffs():
left_backwards_until_lcliff_senses_white()
right_backwards_until_rcliff_senses_white()
left_forwards_until_lcliff_senses_black()
right_forwards_until_rcliff_senses_black()
def align_far_cliffs():
left_forwards_until_lcliff_senses_white()
right_forwards_until_rcliff_senses_white()
left_backwards_until_lcliff_senses_black()
right_backwards_until_rcliff_senses_black()
#----------------------------------Single Motor Align Functions--------------
def left_front_backwards_until_white(): # Left motor goes back until the left front cliff senses white
print "Starting left_front_backwards_until_white()"
m.av(c.LEFT_MOTOR, -1 * c.BASE_LM_POWER)
while BlackFrontLeft():
pass
m.deactivate_motors()
def right_front_backwards_until_white(): # Right motor goes back until right front cliff senses white
print "Starting right_front_backwards_until_white()"
m.av(c.RIGHT_MOTOR, -1 * c.BASE_RM_POWER)
while BlackFrontRight():
pass
m.deactivate_motors()
def left_front_backwards_until_black(): # Left motor goes back until left front cliff senses black
print "Starting left_front_backwards_until_black()"
m.av(c.LEFT_MOTOR, -1 * c.BASE_LM_POWER)
while NotBlackFrontLeft():
pass
m.deactivate_motors()
def right_front_backwards_until_black(): # Right motor goes back until right front cliff senses black
print "Starting right_front_backwards_until_black()"
m.av(c.RIGHT_MOTOR, -1 * c.BASE_RM_POWER)
while NotBlackFrontRight():
pass
m.deactivate_motors()
def left_front_forwards_until_white(): # Left motor goes forwards until the left front cliff senses white
print "Starting left_front_forwards_until_white()"
m.av(c.LEFT_MOTOR, c.BASE_LM_POWER)
while BlackFrontLeft():
pass
m.deactivate_motors()
def right_front_forwards_until_white(): # Right motor goes forwards until right front cliff senses white
print "Starting right_front_forwards_until_white()"
m.av(c.RIGHT_MOTOR, c.BASE_RM_POWER)
while BlackFrontRight():
pass
m.deactivate_motors()
def left_front_forwards_until_black(): # Left motor goes forwards until left front cliff senses black
print "Starting left_front_forwards_until_black()"
m.av(c.LEFT_MOTOR, c.BASE_LM_POWER)
while NotBlackFrontLeft():
pass
m.deactivate_motors()
def right_front_forwards_until_black(): # Right motor goes forwards until left front cliff senses black
print "Starting right_front_forwards_until_black()"
m.av(c.RIGHT_MOTOR, c.BASE_RM_POWER)
while NotBlackFrontRight():
pass
m.deactivate_motors()
#----------------------------------Turning Align Functions--------------
def turn_left_until_lcliff_senses_black(multiplier=1):
print "Starting turn_left_until_lcliff_senses_black()"
m.base_turn_left(multiplier)
while NotBlackLeft():
pass
m.deactivate_motors()
def turn_left_until_rcliff_senses_black(multiplier=1):
print "Starting turn_left_until_rcliff_senses_black()"
m.base_turn_left(multiplier)
while NotBlackRight():
pass
m.deactivate_motors()
def turn_left_until_lfcliff_senses_black(multiplier=1):
print "Starting turn_left_until_lfcliff_senses_black"
m.base_turn_left(multiplier)
while NotBlackFrontLeft():
pass
m.deactivate_motors()
def turn_left_until_rfcliff_senses_black(multiplier=1):
print "Starting turn_left_until_rfcliff_senses_black"
m.base_turn_left(multiplier)
while NotBlackFrontRight():
pass
m.deactivate_motors()
def turn_left_until_lcliff_senses_white(multiplier=1):
print "Starting turn_left_until_lcliff_senses_white()"
m.base_turn_left(multiplier)
while BlackLeft():
pass
m.deactivate_motors()
def turn_left_until_rcliff_senses_white(multiplier=1):
m.base_turn_left(multiplier)
while BlackRight():
pass
m.deactivate_motors()
def turn_left_until_lfcliff_senses_white(multiplier=1):
print "Starting turn_left_until_lfcliff_senses_white"
m.base_turn_left(multiplier)
while BlackFrontLeft():
pass
m.deactivate_motors()
def turn_left_until_rfcliff_senses_white(multiplier=1):
print "Starting turn_left_until_rfcliff_senses_white"
m.base_turn_left(multiplier)
while BlackFrontRight():
pass
m.base_turn_left(multiplier)
m.deactivate_motors()
def turn_right_until_lcliff_senses_black(multiplier=1):
print "Starting turn_right_until_lcliff_senses_black()"
m.base_turn_right(multiplier)
while NotBlackLeft():
pass
m.deactivate_motors()
def turn_right_until_rcliff_senses_black(multiplier=1):
print "Starting turn_right_until_rcliff_senses_black()"
m.base_turn_right(multiplier)
while NotBlackRight():
pass
m.deactivate_motors()
def turn_right_until_lfcliff_senses_black(multiplier=1):
print "Starting turn_right_until_lfcliff_senses_black"
m.base_turn_right(multiplier)
while NotBlackFrontLeft():
pass
m.deactivate_motors()
def turn_right_until_rfcliff_senses_black(multiplier=1):
print "Starting turn_right_until_rfcliff_senses_black"
m.base_turn_right(multiplier)
while NotBlackFrontRight():
pass
m.deactivate_motors()
def turn_right_until_lcliff_senses_white(multiplier=1):
print "Starting turn_right_until_lcliff_senses_white()"
m.base_turn_right(multiplier)
while BlackLeft():
pass
m.deactivate_motors()
def turn_right_until_rcliff_senses_white(multiplier=1):
m.base_turn_right(multiplier)
while BlackRight():
pass
m.deactivate_motors()
def turn_right_until_lfcliff_senses_white(multiplier=1):
print "Starting turn_right_until_lfcliff_senses_white"
m.base_turn_right(multiplier)
while BlackFrontLeft():
pass
m.deactivate_motors()
def turn_right_until_rfcliff_senses_white(multiplier=1):
print "Starting turn_right_until_rfcliff_senses_white"
m.base_turn_right(multiplier)
while BlackFrontRight():
pass
m.deactivate_motors()
#----------------------------------Driving Back Cliff Align Functions----------------------
def left_backwards_until_lcliff_senses_white(): # Left motor goes back until the left | |
serial, which is ATMLXXXXYYYYYYY
# where XXXX is the board identifier.
# This can be verified by looking at readme.htm, which also uses the board ID to redirect to platform page
device['target_id'] = device['target_id_usb_id'][4:8]
platform_data = self.plat_db.get(device['target_id'],
device_type='atmel',
verbose_data=True)
device.update(platform_data or {"platform_name": None})
def mock_manufacture_id(self, mid, platform_name, oper='+'):
"""! Replace (or add if manufacture id doesn't exist) entry in self.manufacture_ids
@param oper '+' add new mock / override existing entry
'-' remove mid from mocking entry
@return Mocked structure (json format)
"""
if oper is '+':
self.plat_db.add(mid, platform_name, permanent=True)
elif oper is '-':
self.plat_db.remove(mid, permanent=True)
else:
raise ValueError("oper can only be [+-]")
@deprecated("List formatting methods are deprecated for a simpler API. "
"Please use 'list_mbeds' instead.")
def list_manufacture_ids(self):
"""! Creates list of all available mappings for target_id -> Platform
@return String with table formatted output
"""
from prettytable import PrettyTable, HEADER
columns = ['target_id_prefix', 'platform_name']
pt = PrettyTable(columns, junction_char="|", hrules=HEADER)
for col in columns:
pt.align[col] = 'l'
for target_id_prefix, platform_name in sorted(self.plat_db.items()):
pt.add_row([target_id_prefix, platform_name])
return pt.get_string()
def retarget_read(self):
"""! Load retarget data from local file
@return Curent retarget configuration (dictionary)
"""
if os.path.isfile(self.RETARGET_FILE_NAME):
logger.debug("reading retarget file %s", self.RETARGET_FILE_NAME)
try:
with open(self.RETARGET_FILE_NAME, "r", encoding="utf-8") as f:
return json.load(f)
except IOError as e:
logger.exception(e)
except ValueError as e:
logger.exception(e)
return {}
def retarget(self):
"""! Enable retargeting
@details Read data from local retarget configuration file
@return Retarget data structure read from configuration file
"""
self.retarget_data = self.retarget_read()
return self.retarget_data
def get_dummy_platform(self, platform_name):
"""! Returns simple dummy platform """
if not hasattr(self, "dummy_counter"):
self.dummy_counter = {} # platform<str>: counter<int>
if platform_name not in self.dummy_counter:
self.dummy_counter[platform_name] = 0
platform = {
"platform_name": platform_name,
"platform_name_unique": "%s[%d]"% (platform_name, self.dummy_counter[platform_name]),
"mount_point": "DUMMY",
"serial_port": "DUMMY",
"target_id": "DUMMY",
"target_id_mbed_htm": "DUMMY",
"target_id_usb_id": "DUMMY",
"daplink_version": "DUMMY"
}
self.dummy_counter[platform_name] += 1
return platform
def get_supported_platforms(self, device_type=None):
"""! Return a dictionary of supported target ids and the corresponding platform name
@param device_type Filter which device entries are returned from the platform database
@return Dictionary of { 'target_id': 'platform_name', ... }
"""
kwargs = {}
if device_type is not None:
kwargs['device_type'] = device_type
items = self.plat_db.items(**kwargs)
return {i[0]: i[1] for i in items}
@deprecated("List formatting methods are deprecated to simplify the API. "
"Please use 'list_mbeds' instead.")
def list_platforms(self):
"""! Useful if you just want to know which platforms are currently available on the system
@return List of (unique values) available platforms
"""
result = []
mbeds = self.list_mbeds()
for i, val in enumerate(mbeds):
platform_name = str(val['platform_name'])
if platform_name not in result:
result.append(platform_name)
return result
@deprecated("List formatting methods are deprecated to simplify the API. "
"Please use 'list_mbeds' instead.")
def list_platforms_ext(self):
"""! Useful if you just want to know how many platforms of each type are currently available on the system
@return Dict of platform: platform_count
"""
result = {}
mbeds = self.list_mbeds()
for i, val in enumerate(mbeds):
platform_name = str(val['platform_name'])
if platform_name not in result:
result[platform_name] = 1
else:
result[platform_name] += 1
return result
@deprecated("List formatting methods are deprecated to simplify the API. "
"Please use 'list_mbeds' instead.")
def list_mbeds_by_targetid(self):
"""! Get information about mbeds with extended parameters/info included
@return Returns dictionary where keys are TargetIDs and values are mbed structures
@details Ordered by target id (key: target_id).
"""
result = {}
mbed_list = self.list_mbeds_ext()
for mbed in mbed_list:
target_id = mbed['target_id']
result[target_id] = mbed
return result
def __str__(self):
"""! Object to string casting
@return Stringified class object should be prettytable formated string
"""
return self.get_string()
@deprecated("List formatting methods are deprecated to simplify the API. "
"Please use 'list_mbeds' instead.")
def get_string(self, border=False, header=True, padding_width=1, sortby='platform_name'):
"""! Printing with some sql table like decorators
@param border Table border visibility
@param header Table header visibility
@param padding_width Table padding
@param sortby Column used to sort results
@return Returns string which can be printed on console
"""
from prettytable import PrettyTable, HEADER
result = ''
mbeds = self.list_mbeds(unique_names=True, read_details_txt=True)
if mbeds:
""" ['platform_name', 'mount_point', 'serial_port', 'target_id'] - columns generated from USB auto-detection
['platform_name_unique', ...] - columns generated outside detection subsystem (OS dependent detection)
"""
columns = ['platform_name', 'platform_name_unique', 'mount_point', 'serial_port', 'target_id', 'daplink_version']
pt = PrettyTable(columns, junction_char="|", hrules=HEADER)
for col in columns:
pt.align[col] = 'l'
for mbed in mbeds:
row = []
for col in columns:
row.append(mbed[col] if col in mbed and mbed[col] else 'unknown')
pt.add_row(row)
result = pt.get_string(border=border, header=header, padding_width=padding_width, sortby=sortby)
return result
# Private functions supporting API
@deprecated("This method will be removed from the public API. "
"Please use 'list_mbeds' instead")
def get_json_data_from_file(self, json_spec_filename, verbose=False):
"""! Loads from file JSON formatted string to data structure
@return None if JSON can be loaded
"""
try:
with open(json_spec_filename) as data_file:
try:
return json.load(data_file)
except ValueError as json_error_msg:
logger.error("Parsing file(%s): %s", json_spec_filename, json_error_msg)
return None
except IOError as fileopen_error_msg:
logger.warning(fileopen_error_msg)
return None
@deprecated("This method will be removed from the public API. "
"Please use 'list_mbeds' instead")
def get_htm_target_id(self, mount_point):
target_id, _ = self._read_htm_ids(mount_point)
return target_id
@deprecated("This method will be removed from the public API. "
"Please use 'list_mbeds' instead")
def get_mbed_htm(self, mount_point):
_, build_info = self._read_htm_ids(mount_point)
return build_info
def _read_htm_ids(self, mount_point):
"""! Function scans mbed.htm to get information about TargetID.
@param mount_point mbed mount point (disk / drive letter)
@return Function returns targetID, in case of failure returns None.
@details Note: This function should be improved to scan variety of boards' mbed.htm files
"""
result = {}
target_id = None
for line in self._htm_lines(mount_point):
target_id = target_id or self._target_id_from_htm(line)
ver_bld = self._mbed_htm_comment_section_ver_build(line)
if ver_bld:
result['version'], result['build'] = ver_bld
m = re.search(r'url=([\w\d\:/\\\?\.=-_]+)', line)
if m:
result['url'] = m.group(1).strip()
return target_id, result
@deprecated("This method will be removed from the public API. "
"Please use 'list_mbeds' instead")
def get_mbed_htm_comment_section_ver_build(self, line):
return self._mbed_htm_comment_section_ver_build(line)
def _mbed_htm_comment_section_ver_build(self, line):
"""! Check for Version and Build date of interface chip firmware im mbed.htm file
@return (version, build) tuple if successful, None if no info found
"""
# <!-- Version: 0200 Build: Mar 26 2014 13:22:20 -->
m = re.search(r'^<!-- Version: (\d+) Build: ([\d\w: ]+) -->', line)
if m:
version_str, build_str = m.groups()
return (version_str.strip(), build_str.strip())
# <!-- Version: 0219 Build: Feb 2 2016 15:20:54 Git Commit SHA: 0853ba0cdeae2436c52efcba0ba76a6434c200ff Git local mods:No-->
m = re.search(r'^<!-- Version: (\d+) Build: ([\d\w: ]+) Git Commit SHA', line)
if m:
version_str, build_str = m.groups()
return (version_str.strip(), build_str.strip())
# <!-- Version: 0.14.3. build 471 -->
m = re.search(r'^<!-- Version: ([\d+\.]+)\. build (\d+) -->', line)
if m:
version_str, build_str = m.groups()
return (version_str.strip(), build_str.strip())
return None
@deprecated("This method will be removed from the public API. "
"Please use 'list_mbeds' instead")
def get_mbed_htm_lines(self, mount_point):
return self._htm_lines(mount_point)
def _htm_lines(self, mount_point):
if mount_point:
mbed_htm_path = join(mount_point, self.MBED_HTM_NAME)
with open(mbed_htm_path, 'r') as f:
return f.readlines()
@deprecated("This method will be removed from the public API. "
"Please use 'list_mbeds' instead")
def get_details_txt(self, mount_point):
return self._details_txt(mount_point)
def _details_txt(self, mount_point):
"""! Load DETAILS.TXT to dictionary:
DETAILS.TXT example:
Version: 0226
Build: Aug 24 2015 17:06:30
Git Commit SHA: 27a236b9fe39c674a703c5c89655fbd26b8e27e1
Git Local mods: Yes
or:
# DAPLink Firmware - see https://mbed.com/daplink
Unique ID: 0240000029164e45002f0012706e0006f301000097969900
HIF ID: 97969900
Auto Reset: 0
Automation allowed: 0
Daplink Mode: Interface
Interface Version: 0240
Git SHA: c765cbb590f57598756683254ca38b211693ae5e
Local Mods: 0
USB Interfaces: MSD, CDC, HID
Interface CRC: 0x26764ebf
"""
if mount_point:
path_to_details_txt = os.path.join(mount_point, self.DETAILS_TXT_NAME)
with open(path_to_details_txt, 'r') as f:
return self._parse_details(f.readlines())
return None
@deprecated("This method will be removed from the public API. "
"Please use 'list_mbeds' instead")
def parse_details_txt(self, lines):
return self._parse_details(lines)
def _parse_details(self, lines):
result = {}
for line in lines:
if not line.startswith('#'):
key, _, value = line.partition(':')
if value:
result[key] = value.strip()
if 'Interface Version' in result:
result['Version'] = result['Interface Version']
return result
@deprecated("This method will be removed from the public API. "
"Please use 'list_mbeds' instead")
def scan_html_line_for_target_id(self, line):
return self._target_id_from_htm(line)
def _target_id_from_htm(self, line):
"""! Extract Target id from htm line.
@return Target id or None
"""
# Detecting modern mbed.htm file format
m = re.search('\?code=([a-fA-F0-9]+)', line)
if m:
result = m.groups()[0]
| |
),
Element( u'N405', Properties(desc=u'Location Qualifier', req_sit=u'S', data_type=(u'ID',u'1',u'2'), position=5,
codes=[u'B1', u'DP'] ) ),
Element( u'N406', Properties(desc=u'Location Identifier', req_sit=u'S', data_type=(u'AN',u'1',u'30'), position=6,
codes=[] ) ),
),
Segment( u'PER', Properties(syntax='',req_sit=u'S',repeat=u'3',pos=u'220',desc=u'Additional Patient Information Contact Information'),
Element( u'PER01', Properties(desc=u'Contact Function Code', req_sit=u'R', data_type=(u'ID',u'2',u'2'), position=1,
codes=[u'IC'] ) ),
Element( u'PER02', Properties(desc=u'Name', req_sit=u'S', data_type=(u'AN',u'1',u'60'), position=2,
codes=[] ) ),
Element( u'PER03', Properties(desc=u'Communication Number Qualifier', req_sit=u'S', data_type=(u'ID',u'2',u'2'), position=3,
codes=[u'EM', u'FX', u'TE'] ) ),
Element( u'PER04', Properties(desc=u'Communication Number', req_sit=u'S', data_type=(u'AN',u'1',u'256'), position=4,
codes=[] ) ),
Element( u'PER05', Properties(desc=u'Communication Number Qualifier', req_sit=u'S', data_type=(u'ID',u'2',u'2'), position=5,
codes=[u'EM', u'EX', u'FX', u'TE'] ) ),
Element( u'PER06', Properties(desc=u'Communication Number', req_sit=u'S', data_type=(u'AN',u'1',u'256'), position=6,
codes=[] ) ),
Element( u'PER07', Properties(desc=u'Communication Number Qualifier', req_sit=u'S', data_type=(u'ID',u'2',u'2'), position=7,
codes=[u'EM', u'EX', u'FX', u'TE'] ) ),
Element( u'PER08', Properties(desc=u'Communication Number', req_sit=u'S', data_type=(u'AN',u'1',u'256'), position=8,
codes=[] ) ),
Element( u'PER09', Properties(desc=u'Contact Inquiry Reference', req_sit=u'N', data_type=(u'AN',u'1',u'20'), position=9,
codes=[] ) ),
),
)
parsed_278_2010DA = Loop( u'2010DA', Properties(looptype='',repeat=u'1',pos=u'170',req_sit=u'R',desc=u'Dependent Name'),
Segment( u'NM1', Properties(syntax='',req_sit=u'R',repeat=u'1',pos=u'170',desc=u'Dependent Name'),
Element( u'NM101', Properties(desc=u'Entity Identifier Code', req_sit=u'R', data_type=(u'ID',u'2',u'3'), position=1,
codes=[u'QC'] ) ),
Element( u'NM102', Properties(desc=u'Entity Type Qualifier', req_sit=u'R', data_type=(u'ID',u'1',u'1'), position=2,
codes=[u'1'] ) ),
Element( u'NM103', Properties(desc=u'Name Last or Organization Name', req_sit=u'S', data_type=(u'AN',u'1',u'60'), position=3,
codes=[] ) ),
Element( u'NM104', Properties(desc=u'Name First', req_sit=u'S', data_type=(u'AN',u'1',u'35'), position=4,
codes=[] ) ),
Element( u'NM105', Properties(desc=u'Name Middle', req_sit=u'S', data_type=(u'AN',u'1',u'25'), position=5,
codes=[] ) ),
Element( u'NM106', Properties(desc=u'Name Prefix', req_sit=u'N', data_type=(u'AN',u'1',u'10'), position=6,
codes=[] ) ),
Element( u'NM107', Properties(desc=u'Name Suffix', req_sit=u'S', data_type=(u'AN',u'1',u'10'), position=7,
codes=[] ) ),
Element( u'NM108', Properties(desc=u'Identification Code Qualifier', req_sit=u'S', data_type=(u'ID',u'1',u'2'), position=8,
codes=[u'MI', u'ZZ'] ) ),
Element( u'NM109', Properties(desc=u'Identification Code', req_sit=u'S', data_type=(u'AN',u'2',u'80'), position=9,
codes=[] ) ),
Element( u'NM110', Properties(desc=u'Entity Relationship Code', req_sit=u'N', data_type=(u'ID',u'2',u'2'), position=10,
codes=[] ) ),
Element( u'NM111', Properties(desc=u'Entity Identifier Code', req_sit=u'N', data_type=(u'ID',u'2',u'3'), position=11,
codes=[] ) ),
),
Segment( u'REF', Properties(syntax='',req_sit=u'S',repeat=u'3',pos=u'180',desc=u'Dependent Supplemental Identification'),
Element( u'REF01', Properties(desc=u'Reference Identification Qualifier', req_sit=u'R', data_type=(u'ID',u'2',u'3'), position=1,
codes=[u'A6', u'EJ', u'SY'] ) ),
Element( u'REF02', Properties(desc=u'Reference Identification', req_sit=u'R', data_type=(u'AN',u'1',u'50'), position=2,
codes=[] ) ),
Element( u'REF03', Properties(desc=u'Description', req_sit=u'N', data_type=(u'AN',u'1',u'80'), position=3,
codes=[] ) ),
Composite( u'C040', Properties(req_sit=u'N',refdes='',seq=u'04',desc=u'Reference Identifier'),
),
),
Segment( u'AAA', Properties(syntax='',req_sit=u'S',repeat=u'9',pos=u'230',desc=u'Dependent Request Validation'),
Element( u'AAA01', Properties(desc=u'Yes/No Condition or Response Code', req_sit=u'R', data_type=(u'ID',u'1',u'1'), position=1,
codes=[u'N', u'Y'] ) ),
Element( u'AAA02', Properties(desc=u'Agency Qualifier Code', req_sit=u'N', data_type=(u'ID',u'2',u'2'), position=2,
codes=[] ) ),
Element( u'AAA03', Properties(desc=u'Reject Reason Code', req_sit=u'S', data_type=(u'ID',u'2',u'2'), position=3,
codes=[u'15', u'33', u'58', u'64', u'65', u'66', u'67', u'68', u'71', u'77', u'95'] ) ),
Element( u'AAA04', Properties(desc=u'Follow-up Action Code', req_sit=u'S', data_type=(u'ID',u'1',u'1'), position=4,
codes=[u'C', u'N'] ) ),
),
Segment( u'DMG', Properties(syntax='',req_sit=u'S',repeat=u'1',pos=u'250',desc=u'Dependent Demographic Information'),
Element( u'DMG01', Properties(desc=u'Date Time Period Format Qualifier', req_sit=u'R', data_type=(u'ID',u'2',u'3'), position=1,
codes=[u'D8'] ) ),
Element( u'DMG02', Properties(desc=u'Date Time Period', req_sit=u'R', data_type=(u'AN',u'1',u'35'), position=2,
codes=[] ) ),
Element( u'DMG03', Properties(desc=u'Gender Code', req_sit=u'S', data_type=(u'ID',u'1',u'1'), position=3,
codes=[u'F', u'M', u'U'] ) ),
Element( u'DMG04', Properties(desc=u'Marital Status Code', req_sit=u'N', data_type=(u'ID',u'1',u'1'), position=4,
codes=[] ) ),
Element( u'DMG05', Properties(desc=u'Race or Ethnicity Code', req_sit=u'N', data_type=(u'ID',u'1',u'1'), position=5,
codes=[] ) ),
Element( u'DMG06', Properties(desc=u'Citizenship Status Code', req_sit=u'N', data_type=(u'ID',u'1',u'2'), position=6,
codes=[] ) ),
Element( u'DMG07', Properties(desc=u'Country Code', req_sit=u'N', data_type=(u'ID',u'2',u'3'), position=7,
codes=[] ) ),
Element( u'DMG08', Properties(desc=u'Basis of Verification Code', req_sit=u'N', data_type=(u'ID',u'1',u'2'), position=8,
codes=[] ) ),
Element( u'DMG09', Properties(desc=u'Quantity', req_sit=u'N', data_type=(u'R',u'1',u'15'), position=9,
codes=[] ) ),
),
Segment( u'INS', Properties(syntax='',req_sit=u'S',repeat=u'1',pos=u'260',desc=u'Dependent Relationship'),
Element( u'INS01', Properties(desc=u'Yes/No Condition or Response Code', req_sit=u'R', data_type=(u'ID',u'1',u'1'), position=1,
codes=[u'N'] ) ),
Element( u'INS02', Properties(desc=u'Individual Relationship Code', req_sit=u'R', data_type=(u'ID',u'2',u'2'), position=2,
codes=[u'01', u'04', u'05', u'07', u'09', u'10', u'15', u'17', u'19', u'20', u'21', u'22', u'23', u'24', u'29', u'32', u'33', u'34', u'39', u'40', u'41', u'43', u'53', u'G8'] ) ),
Element( u'INS03', Properties(desc=u'Maintenance Type Code', req_sit=u'N', data_type=(u'ID',u'3',u'3'), position=3,
codes=[] ) ),
Element( u'INS04', Properties(desc=u'Maintenance Reason Code', req_sit=u'N', data_type=(u'ID',u'2',u'3'), position=4,
codes=[] ) ),
Element( u'INS05', Properties(desc=u'Benefit Status Code', req_sit=u'N', data_type=(u'ID',u'1',u'1'), position=5,
codes=[] ) ),
Element( u'INS06', Properties(desc=u'Medicare Plan Code', req_sit=u'N', data_type=(u'ID',u'1',u'1'), position=6,
codes=[] ) ),
Element( u'INS07', Properties(desc=u'Consolidated Omnibus Budget Reconciliation Act (COBRA) Qualifying', req_sit=u'N', data_type=(u'ID',u'1',u'2'), position=7,
codes=[] ) ),
Element( u'INS08', Properties(desc=u'Employment Status Code', req_sit=u'N', data_type=(u'ID',u'2',u'2'), position=8,
codes=[] ) ),
Element( u'INS09', Properties(desc=u'Student Status Code', req_sit=u'N', data_type=(u'ID',u'1',u'1'), position=9,
codes=[] ) ),
Element( u'INS10', Properties(desc=u'Yes/No Condition or Response Code', req_sit=u'N', data_type=(u'ID',u'1',u'1'), position=10,
codes=[] ) ),
Element( u'INS11', Properties(desc=u'Date Time Period Format Qualifier', req_sit=u'N', data_type=(u'ID',u'2',u'3'), position=11,
codes=[] ) ),
Element( u'INS12', Properties(desc=u'Date Time Period', req_sit=u'N', data_type=(u'AN',u'1',u'35'), position=12,
codes=[] ) ),
Element( u'INS13', Properties(desc=u'Confidentiality Code', req_sit=u'N', data_type=(u'ID',u'1',u'1'), position=13,
codes=[] ) ),
Element( u'INS14', Properties(desc=u'City Name', req_sit=u'N', data_type=(u'AN',u'2',u'30'), position=14,
codes=[] ) ),
Element( u'INS15', Properties(desc=u'State or Province Code', req_sit=u'N', data_type=(u'ID',u'2',u'2'), position=15,
codes=[] ) ),
Element( u'INS16', Properties(desc=u'Country Code', req_sit=u'N', data_type=(u'ID',u'2',u'3'), position=16,
codes=[] ) ),
Element( u'INS17', Properties(desc=u'Number', req_sit=u'S', data_type=(u'N0',u'1',u'9'), position=17,
codes=[] ) ),
),
)
parsed_278_2010DB = Loop( u'2010DB', Properties(looptype='',repeat=u'1',pos=u'170',req_sit=u'S',desc=u'Additional Patient Information Contact Name'),
Segment( u'NM1', Properties(syntax='',req_sit=u'R',repeat=u'1',pos=u'170',desc=u'Additional Patient Information Contact Name'),
Element( u'NM101', Properties(desc=u'Entity Identifier Code', req_sit=u'R', data_type=(u'ID',u'2',u'3'), position=1,
codes=[u'1P', u'2B', u'ABG', u'FA', u'PR', u'X3'] ) ),
Element( u'NM102', Properties(desc=u'Entity Type Qualifier', req_sit=u'R', data_type=(u'ID',u'1',u'1'), position=2,
codes=[u'1', u'2'] ) ),
Element( u'NM103', Properties(desc=u'Name Last or Organization Name', req_sit=u'S', data_type=(u'AN',u'1',u'60'), position=3,
codes=[] ) ),
Element( u'NM104', Properties(desc=u'Name First', req_sit=u'S', data_type=(u'AN',u'1',u'35'), position=4,
codes=[] ) ),
Element( u'NM105', Properties(desc=u'Name Middle', req_sit=u'S', data_type=(u'AN',u'1',u'25'), position=5,
codes=[] ) ),
Element( u'NM106', Properties(desc=u'Name Prefix', req_sit=u'N', data_type=(u'AN',u'1',u'10'), position=6,
codes=[] ) ),
Element( u'NM107', Properties(desc=u'Name Suffix', req_sit=u'S', data_type=(u'AN',u'1',u'10'), position=7,
codes=[] ) ),
Element( u'NM108', Properties(desc=u'Identification Code Qualifier', req_sit=u'S', data_type=(u'ID',u'1',u'2'), position=8,
codes=[u'24', u'34', u'46', u'PI', u'XV', u'XX'] ) ),
Element( u'NM109', Properties(desc=u'Identification Code', req_sit=u'S', data_type=(u'AN',u'2',u'80'), position=9,
codes=[] ) ),
Element( u'NM110', Properties(desc=u'Entity Relationship Code', req_sit=u'N', data_type=(u'ID',u'2',u'2'), position=10,
codes=[] ) ),
Element( u'NM111', Properties(desc=u'Entity Identifier Code', req_sit=u'N', data_type=(u'ID',u'2',u'3'), position=11,
codes=[] ) ),
),
Segment( u'N3', Properties(syntax='',req_sit=u'S',repeat=u'1',pos=u'200',desc=u'Additional Patient Information Contact Address'),
Element( u'N301', Properties(desc=u'Address Information', req_sit=u'R', data_type=(u'AN',u'1',u'55'), position=1,
codes=[] ) ),
Element( u'N302', Properties(desc=u'Address Information', req_sit=u'S', data_type=(u'AN',u'1',u'55'), position=2,
codes=[] ) ),
),
Segment( u'N4', Properties(syntax='',req_sit=u'S',repeat=u'1',pos=u'210',desc=u'Additional Patient Information Contact City/State/Zip Code'),
Element( u'N401', Properties(desc=u'City Name', req_sit=u'S', data_type=(u'AN',u'2',u'30'), position=1,
codes=[] ) ),
Element( u'N402', Properties(desc=u'State or Province Code', req_sit=u'S', data_type=(u'ID',u'2',u'2'), position=2,
codes=[] ) ),
Element( u'N403', Properties(desc=u'Postal Code', req_sit=u'S', data_type=(u'ID',u'3',u'15'), position=3,
codes=[] ) ),
Element( u'N404', Properties(desc=u'Country Code', req_sit=u'S', data_type=(u'ID',u'2',u'3'), position=4,
codes=[] ) ),
Element( u'N405', Properties(desc=u'Location Qualifier', req_sit=u'S', data_type=(u'ID',u'1',u'2'), position=5,
codes=[u'B1', u'DP'] ) ),
Element( u'N406', Properties(desc=u'Location Identifier', req_sit=u'S', data_type=(u'AN',u'1',u'30'), position=6,
codes=[] ) ),
),
Segment( u'PER', Properties(syntax='',req_sit=u'S',repeat=u'3',pos=u'220',desc=u'Additional Patient Information Contact Information'),
Element( u'PER01', Properties(desc=u'Contact Function Code', req_sit=u'R', data_type=(u'ID',u'2',u'2'), position=1,
codes=[u'IC'] ) ),
Element( u'PER02', Properties(desc=u'Name', req_sit=u'S', data_type=(u'AN',u'1',u'60'), position=2,
codes=[] ) ),
Element( u'PER03', Properties(desc=u'Communication Number Qualifier', req_sit=u'S', data_type=(u'ID',u'2',u'2'), position=3,
codes=[u'EM', u'FX', u'TE'] ) ),
Element( u'PER04', Properties(desc=u'Communication Number', req_sit=u'S', data_type=(u'AN',u'1',u'256'), position=4,
codes=[] ) ),
Element( u'PER05', Properties(desc=u'Communication Number Qualifier', req_sit=u'S', data_type=(u'ID',u'2',u'2'), position=5,
codes=[u'EM', u'EX', u'FX', u'TE'] ) ),
Element( u'PER06', Properties(desc=u'Communication Number', req_sit=u'S', data_type=(u'AN',u'1',u'256'), position=6,
codes=[] ) ),
Element( u'PER07', Properties(desc=u'Communication Number Qualifier', req_sit=u'S', data_type=(u'ID',u'2',u'2'), position=7,
codes=[u'EM', u'EX', u'FX', u'TE'] ) ),
Element( u'PER08', Properties(desc=u'Communication Number', req_sit=u'S', data_type=(u'AN',u'1',u'256'), position=8,
codes=[] ) ),
Element( u'PER09', Properties(desc=u'Contact Inquiry Reference', req_sit=u'N', data_type=(u'AN',u'1',u'20'), position=9,
codes=[] ) ),
),
)
parsed_278_2010E = Loop( u'2010E', Properties(looptype='',repeat=u'3',pos=u'170',req_sit=u'R',desc=u'Service Provider Name'),
Segment( u'NM1', Properties(syntax='',req_sit=u'R',repeat=u'1',pos=u'170',desc=u'Service Provider Name'),
Element( u'NM101', Properties(desc=u'Entity Identifier Code', req_sit=u'R', data_type=(u'ID',u'2',u'3'), position=1,
codes=[u'1T', u'FA', u'SJ'] ) ),
Element( u'NM102', Properties(desc=u'Entity Type Qualifier', req_sit=u'R', data_type=(u'ID',u'1',u'1'), position=2,
codes=[u'1', u'2'] ) ),
Element( u'NM103', Properties(desc=u'Name Last or Organization Name', req_sit=u'S', data_type=(u'AN',u'1',u'60'), position=3,
codes=[] ) ),
Element( u'NM104', Properties(desc=u'Name First', req_sit=u'S', data_type=(u'AN',u'1',u'35'), position=4,
codes=[] ) ),
Element( u'NM105', Properties(desc=u'Name Middle', req_sit=u'S', data_type=(u'AN',u'1',u'25'), position=5,
codes=[] ) ),
Element( u'NM106', Properties(desc=u'Name Prefix', req_sit=u'N', data_type=(u'AN',u'1',u'10'), position=6,
codes=[] ) ),
Element( u'NM107', Properties(desc=u'Name Suffix', req_sit=u'S', data_type=(u'AN',u'1',u'10'), position=7,
codes=[] ) ),
Element( u'NM108', Properties(desc=u'Identification Code Qualifier', req_sit=u'S', data_type=(u'ID',u'1',u'2'), position=8,
codes=[u'24', u'34', u'46', u'XX'] ) ),
Element( u'NM109', Properties(desc=u'Identification Code', req_sit=u'S', data_type=(u'AN',u'2',u'80'), position=9,
codes=[] ) ),
Element( u'NM110', Properties(desc=u'Entity Relationship Code', req_sit=u'N', data_type=(u'ID',u'2',u'2'), position=10,
codes=[] ) ),
Element( u'NM111', Properties(desc=u'Entity Identifier Code', req_sit=u'N', data_type=(u'ID',u'2',u'3'), position=11,
codes=[] ) ),
),
Segment( u'REF', Properties(syntax='',req_sit=u'S',repeat=u'7',pos=u'180',desc=u'Service Provider Supplemental Identification'),
Element( u'REF01', Properties(desc=u'Reference Identification Qualifier', req_sit=u'R', data_type=(u'ID',u'2',u'3'), position=1,
codes=[u'1G', u'1J', u'EI', u'N5', u'N7', u'SY', u'ZH'] ) ),
Element( u'REF02', Properties(desc=u'Reference Identification', req_sit=u'R', data_type=(u'AN',u'1',u'50'), position=2,
codes=[] ) ),
Element( u'REF03', Properties(desc=u'Description', req_sit=u'N', data_type=(u'AN',u'1',u'80'), position=3,
codes=[] ) ),
Composite( u'C040', Properties(req_sit=u'N',refdes='',seq=u'04',desc=u'Reference Identifier'),
),
),
Segment( u'N3', Properties(syntax='',req_sit=u'S',repeat=u'1',pos=u'200',desc=u'Service Provider Address'),
Element( u'N301', Properties(desc=u'Address Information', req_sit=u'R', data_type=(u'AN',u'1',u'55'), position=1,
codes=[] ) ),
Element( u'N302', Properties(desc=u'Address Information', req_sit=u'S', data_type=(u'AN',u'1',u'55'), position=2,
codes=[] ) ),
),
Segment( u'N4', Properties(syntax='',req_sit=u'S',repeat=u'1',pos=u'210',desc=u'Service Provider City/State/ZIP Code'),
Element( u'N401', Properties(desc=u'City Name', req_sit=u'S', data_type=(u'AN',u'2',u'30'), position=1,
codes=[] ) ),
Element( u'N402', Properties(desc=u'State or Province Code', req_sit=u'S', data_type=(u'ID',u'2',u'2'), position=2,
codes=[] ) ),
Element( u'N403', Properties(desc=u'Postal Code', req_sit=u'S', data_type=(u'ID',u'3',u'15'), position=3,
codes=[] ) ),
Element( u'N404', Properties(desc=u'Country Code', req_sit=u'S', data_type=(u'ID',u'2',u'3'), position=4,
codes=[] | |
<reponame>Agnarsh/functions
# *****************************************************************************
# © Copyright IBM Corp. 2018. All Rights Reserved.
#
# This program and the accompanying materials
# are made available under the terms of the Apache V2.0 license
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
#
# *****************************************************************************
import datetime as dt
import logging
logger = logging.getLogger(__name__)
class BaseUIControl(object):
is_ui_control = True
def convert_datatype(self, from_datatype):
conversions = {bool: 'BOOLEAN', str: 'LITERAL', float: 'NUMBER', int: 'NUMBER', dict: 'JSON',
dt.datetime: 'TIMESTAMP', None: None}
try:
return conversions[from_datatype]
except KeyError:
msg = 'couldnt convert type %s ' % from_datatype
raise TypeError(msg)
def convert_schema_datatype(self, from_datatype):
conversions = {bool: 'boolean', str: 'string', float: 'number', int: 'number', dt.datetime: 'number',
None: None}
try:
return conversions[from_datatype]
except KeyError:
msg = 'couldnt convert type %s ' % from_datatype
raise TypeError(msg)
class UIFunctionOutSingle(BaseUIControl):
"""
Single output item
Parameters
-----------
name : str
Name of function argument
datatype: python datatype object
Used to validate UI input. e.g. str, float, dt.datetime, bool
description: str
Help text to display in UI
tags: list of strs
Optional tags, e.g. ['DIMENSION', 'EVENT', 'ALERT']
"""
type_ = 'OUTPUT_DATA_ITEM'
def __init__(self, name, datatype=None, description=None, tags=None):
self.name = name
self.datatype = datatype
if description is None:
description = 'Choose an item name for the function output'
self.description = description
if tags is None:
tags = []
self.tags = tags
def to_metadata(self):
meta = {'name': self.name, 'dataType': self.convert_datatype(self.datatype), 'description': self.description,
'tags': self.tags}
return meta
class UIFunctionOutMulti(BaseUIControl):
"""
Array of multiple outputs
Parameters
-----------
name : str
Name of function argument
cardinality_from: str
Name of input argument that defines the number of items to expect from this array output. Specify an array input.
is_datatype_derived: bool
Specify true when the output datatypes are the same as the datatypes of the input array that drives this output array.
datatype: python datatype object
Used to validate UI input. e.g. str, float, dt.datetime, bool
description: str
Help text to display in UI
tags: list of strs
Optional tags, e.g. ['DIMENSION', 'EVENT', 'ALERT']
"""
type_ = 'OUTPUT_DATA_ITEM'
def __init__(self, name, cardinality_from, is_datatype_derived=False, datatype=None, description=None, tags=None,
output_item=None):
self.name = name
self.cardinality_from = cardinality_from
self.is_datatype_derived = is_datatype_derived
if description is None:
description = 'Provide names and datatypes for output items'
self.description = description
if datatype is not None:
datatype = self.convert_datatype(datatype)
self.datatype = datatype
if tags is None:
tags = []
self.tags = tags
def to_metadata(self):
if not self.datatype is None:
datatype = [self.datatype]
else:
datatype = None
meta = {'name': self.name, 'cardinalityFrom': self.cardinality_from, 'dataTypeForArray': datatype,
'description': self.description, 'tags': self.tags,
'jsonSchema': {"$schema": "http://json-schema.org/draft-07/schema#", "type": "array",
"items": {"type": "string"}}}
if self.is_datatype_derived:
meta['dataTypeFrom'] = self.cardinality_from
return meta
class UISingleItem(BaseUIControl):
"""
Choose a single item as a function argument
Parameters
-----------
name : str
Name of function argument
datatype: python datatype object
Used to validate UI input. e.g. str, float, dt.datetime, bool
required: bool
Specify True when this argument is mandatory
description: str
Help text to display in UI
tags: list of strs
Optional tags, e.g. ['DIMENSION', 'EVENT', 'ALERT']
"""
type_ = 'DATA_ITEM'
def __init__(self, name, datatype=None, description=None, required=True, tags=None):
self.name = name
self.datatype = datatype
self.required = required
if description is None:
description = 'Choose one or more data item to use as a function input'
self.description = description
if tags is None:
tags = []
self.tags = tags
def to_metadata(self):
if self.datatype is None:
datatype = None
else:
datatype = self.convert_datatype(self.datatype)
meta = {'name': self.name, 'type': self.type_, 'dataType': datatype, 'required': self.required,
'description': self.description, 'tags': self.tags}
return meta
class UIMultiItem(BaseUIControl):
"""
Multi-select list of data items
Parameters
-----------
name : str
Name of function argument
datatype: python datatype object
Used to validate UI input. e.g. str, float, dt.datetime, bool
required: bool
Specify True when this argument is mandatory
min_items: int
The minimum number of items that must be selected
max_items: int
The maximum number of items that can be selected
description: str
Help text to display in UI
tags: list of strs
Optional tags, e.g. ['DIMENSION', 'EVENT', 'ALERT']
"""
type_ = 'DATA_ITEM'
def __init__(self, name, datatype=None, description=None, required=True, min_items=None, max_items=None, tags=None,
output_item=None, is_output_datatype_derived=False, output_datatype=None):
self.name = name
self.datatype = datatype
self.required = required
if description is None:
description = 'Choose one or more data item to use as a function input'
self.description = description
if min_items is None:
if self.required:
min_items = 1
else:
min_items = 0
self.min_items = min_items
self.max_items = max_items
if tags is None:
tags = []
self.tags = tags
# the following metadata is optional
# used to create an array output for this input
self.output_item = output_item
self.is_output_datatype_derived = is_output_datatype_derived
self.output_datatype = output_datatype
def to_metadata(self):
if self.datatype is None:
datatype = None
else:
datatype = [self.convert_datatype(self.datatype)]
meta = {'name': self.name, 'type': self.type_, 'dataType': 'ARRAY', 'dataTypeForArray': datatype,
'required': self.required, 'description': self.description, 'tags': self.tags,
'jsonSchema': {"$schema": "http://json-schema.org/draft-07/schema#", "type": "array",
"minItems": self.min_items, "maxItems": self.max_items, "items": {"type": "string"}}}
return meta
def to_output_metadata(self):
if self.output_item is not None:
if not self.output_datatype is None:
datatype = [self.convert_datatype(self.output_datatype)]
else:
datatype = None
meta = {'name': self.output_item, 'cardinalityFrom': self.name, 'dataTypeForArray': datatype,
'description': self.description, 'tags': self.tags,
'jsonSchema': {"$schema": "http://json-schema.org/draft-07/schema#", "type": "array",
"items": {"type": "string"}}}
if self.is_output_datatype_derived:
meta['dataTypeFrom'] = self.name
return meta
else:
return None
class UIMulti(BaseUIControl):
"""
Multi-select list of constants
Parameters
-----------
name : str
Name of function argument
datatype: python datatype object
Used to validate UI input. e.g. str, float, dt.datetime, bool
required: bool
Specify True when this argument is mandatory
min_items: int
The minimum number of values that must be entered/selected
max_items: int
The maximum number of values that can be entered/selected
description: str
Help text to display in UI
tags: list of strs
Optional tags, e.g. ['DIMENSION', 'EVENT', 'ALERT']
values: list
Values to display in UI picklist
"""
type_ = 'CONSTANT'
def __init__(self, name, datatype, description=None, required=True, min_items=None, max_items=None, tags=None,
values=None, output_item=None, is_output_datatype_derived=False, output_datatype=None):
self.name = name
self.datatype = datatype
self.required = required
if description is None:
description = 'Enter a list of comma separated values'
self.description = description
if min_items is None:
if self.required:
min_items = 1
else:
min_items = 0
self.min_items = min_items
self.max_items = max_items
if tags is None:
tags = []
self.tags = tags
self.values = values
# the following metadata is optional
# used to create an array output for this input
self.output_item = output_item
self.is_output_datatype_derived = is_output_datatype_derived
self.output_datatype = output_datatype
def to_metadata(self):
if self.datatype is None:
msg = 'Datatype is required for multi constant array input %s' % self.name
raise ValueError(msg)
else:
datatype = [self.convert_datatype(self.datatype)]
schema_datatype = self.convert_schema_datatype(self.datatype)
meta = {'name': self.name, 'type': self.type_, 'dataType': 'ARRAY', 'dataTypeForArray': datatype,
'required': self.required, 'description': self.description, 'tags': self.tags, 'values': self.values,
'jsonSchema': {"$schema": "http://json-schema.org/draft-07/schema#", "type": "array",
"minItems": self.min_items, "maxItems": self.max_items,
"items": {"type": schema_datatype}}}
return meta
def to_output_metadata(self):
if self.output_item is not None:
if self.output_datatype is not None:
datatype = [self.convert_datatype(self.output_datatype)]
schema_type = self.convert_schema_datatype(self.output_datatype)
else:
datatype = None
schema_type = None
meta = {'name': self.output_item, 'cardinalityFrom': self.name, 'dataTypeForArray': datatype,
'description': self.description, 'tags': self.tags,
'jsonSchema': {"$schema": "http://json-schema.org/draft-07/schema#", "type": "array",
"items": {"type": schema_type}}}
if self.is_output_datatype_derived:
meta['dataTypeFrom'] = self.name
return meta
else:
return None
class UISingle(BaseUIControl):
"""
Single valued constant
Parameters
-----------
name : str
Name of function argument
datatype: python datatype object
Used to validate UI input. e.g. str, float, dt.datetime, bool
required: bool
Specify True when this argument is mandatory
description: str
Help text to display in UI
tags: list of strs
Optional tags, e.g. ['DIMENSION', 'EVENT', 'ALERT']
values: list
Values to display in UI picklist
"""
type_ = 'CONSTANT'
def __init__(self, name, datatype=None, description=None, tags=None, required=True, values=None, default=None):
self.name = name
self.datatype = datatype
if description is None:
description = 'Enter a constant value'
self.description = description
if tags is None:
tags = []
self.tags = tags
self.required = required
self.values = values
self.default = default
def to_metadata(self):
meta = {'name': self.name, 'type': self.type_, 'dataType': self.convert_datatype(self.datatype),
'description': self.description, 'tags': self.tags, 'required': self.required, 'values': self.values}
if self.default is not None:
if isinstance(self.default, dict):
meta['value'] = self.default
else:
meta['value'] | |
return an analog filter, otherwise a digital filter is
returned.
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
cheb2ord, cheb2ap
Notes
-----
The Chebyshev type II filter maximizes the rate of cutoff between the
frequency response's passband and stopband, at the expense of ripple in
the stopband and increased ringing in the step response.
Type II filters do not roll off as fast as Type I (`cheby1`).
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Plot the filter's frequency response, showing the critical points:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.cheby2(4, 40, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Chebyshev Type II frequency response (rs=40)')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.axhline(-40, color='green') # rs
>>> plt.show()
"""
return iirfilter(N, Wn, rs=rs, btype=btype, analog=analog,
output=output, ftype='cheby2')
def ellip(N, rp, rs, Wn, btype='low', analog=False, output='ba'):
"""
Elliptic (Cauer) digital and analog filter design.
Design an Nth-order digital or analog elliptic filter and return
the filter coefficients.
Parameters
----------
N : int
The order of the filter.
rp : float
The maximum ripple allowed below unity gain in the passband.
Specified in decibels, as a positive number.
rs : float
The minimum attenuation required in the stop band.
Specified in decibels, as a positive number.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For elliptic filters, this is the point in the transition band at
which the gain first drops below -`rp`.
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
ellipord, ellipap
Notes
-----
Also known as Cauer or Zolotarev filters, the elliptical filter maximizes
the rate of transition between the frequency response's passband and
stopband, at the expense of ripple in both, and increased ringing in the
step response.
As `rp` approaches 0, the elliptical filter becomes a Chebyshev
type II filter (`cheby2`). As `rs` approaches 0, it becomes a Chebyshev
type I filter (`cheby1`). As both approach 0, it becomes a Butterworth
filter (`butter`).
The equiripple passband has N maxima or minima (for example, a
5th-order filter has 3 maxima and 2 minima). Consequently, the DC gain is
unity for odd-order filters, or -rp dB for even-order filters.
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Plot the filter's frequency response, showing the critical points:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.ellip(4, 5, 40, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Elliptic filter frequency response (rp=5, rs=40)')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.axhline(-40, color='green') # rs
>>> plt.axhline(-5, color='green') # rp
>>> plt.show()
"""
return iirfilter(N, Wn, rs=rs, rp=rp, btype=btype, analog=analog,
output=output, ftype='elliptic')
def bessel(N, Wn, btype='low', analog=False, output='ba', norm='phase'):
"""Bessel/Thomson digital and analog filter design.
Design an Nth-order digital or analog Bessel filter and return the
filter coefficients.
Parameters
----------
N : int
The order of the filter.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies (defined
by the `norm` parameter).
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned. (See Notes.)
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
norm : {'phase', 'delay', 'mag'}, optional
Critical frequency normalization:
``phase``
The filter is normalized such that the phase response reaches its
midpoint at angular (e.g. rad/s) frequency `Wn`. This happens for
both low-pass and high-pass filters, so this is the
"phase-matched" case.
The magnitude response asymptotes are the same as a Butterworth
filter of the same order with a cutoff of `Wn`.
This is the default, and matches MATLAB's implementation.
``delay``
The filter is normalized such that the group delay in the passband
is 1/`Wn` (e.g. seconds). This is the "natural" type obtained by
solving Bessel polynomials.
``mag``
The filter is normalized such that the gain magnitude is -3 dB at
angular frequency `Wn`.
.. versionadded:: 0.18.0
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
Notes
-----
Also known as a Thomson filter, the analog Bessel filter has maximally
flat group delay and maximally linear phase response, with very little
ringing in the step response. [1]_
The Bessel is inherently an analog filter. This function generates digital
Bessel filters using the bilinear transform, which does not preserve the
phase response of the analog filter. As such, it is only approximately
correct at frequencies below about fs/4. To get maximally-flat group
delay at higher frequencies, the analog Bessel filter must be transformed
using phase-preserving techniques.
See `besselap` for implementation details and references.
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Plot the phase-normalized frequency response, showing the relationship
to the Butterworth's cutoff frequency (green):
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.butter(4, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(np.abs(h)), color='silver', ls='dashed')
>>> b, a = signal.bessel(4, 100, 'low', analog=True, norm='phase')
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(np.abs(h)))
>>> plt.title('Bessel filter magnitude response (with Butterworth)')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.show()
and the phase midpoint:
>>> plt.figure()
>>> plt.semilogx(w, np.unwrap(np.angle(h)))
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.axhline(-np.pi, color='red') # phase midpoint
>>> plt.title('Bessel filter phase response')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Phase [radians]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.show()
Plot the magnitude-normalized frequency response, showing the -3 dB cutoff:
>>> b, a = signal.bessel(3, 10, 'low', analog=True, norm='mag')
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(np.abs(h)))
>>> plt.axhline(-3, color='red') | |
'-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
time_parts = input_data.split('.')
if len(time_parts) > 1:
micro_seconds = int(float('0.' + time_parts[1]) * 1000000)
input_data = '%s.%s' % (
time_parts[0], "{}".format(micro_seconds).rjust(6, "0"), )
dt = datetime_.datetime.strptime(
input_data, '%Y-%m-%dT%H:%M:%S.%f')
else:
dt = datetime_.datetime.strptime(
input_data, '%Y-%m-%dT%H:%M:%S')
dt = dt.replace(tzinfo=tz)
return dt
def gds_validate_date(self, input_data, node=None, input_name=''):
return input_data
def gds_format_date(self, input_data, input_name=''):
_svalue = '%04d-%02d-%02d' % (
input_data.year,
input_data.month,
input_data.day,
)
try:
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(
hours, minutes)
except AttributeError:
pass
return _svalue
@classmethod
def gds_parse_date(cls, input_data):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
dt = datetime_.datetime.strptime(input_data, '%Y-%m-%d')
dt = dt.replace(tzinfo=tz)
return dt.date()
def gds_validate_time(self, input_data, node=None, input_name=''):
return input_data
def gds_format_time(self, input_data, input_name=''):
if input_data.microsecond == 0:
_svalue = '%02d:%02d:%02d' % (
input_data.hour,
input_data.minute,
input_data.second,
)
else:
_svalue = '%02d:%02d:%02d.%s' % (
input_data.hour,
input_data.minute,
input_data.second,
('%f' % (float(input_data.microsecond) / 1000000))[2:],
)
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(hours, minutes)
return _svalue
def gds_validate_simple_patterns(self, patterns, target):
# pat is a list of lists of strings/patterns.
# The target value must match at least one of the patterns
# in order for the test to succeed.
found1 = True
for patterns1 in patterns:
found2 = False
for patterns2 in patterns1:
mo = re_.search(patterns2, target)
if mo is not None and len(mo.group(0)) == len(target):
found2 = True
break
if not found2:
found1 = False
break
return found1
@classmethod
def gds_parse_time(cls, input_data):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
if len(input_data.split('.')) > 1:
dt = datetime_.datetime.strptime(input_data, '%H:%M:%S.%f')
else:
dt = datetime_.datetime.strptime(input_data, '%H:%M:%S')
dt = dt.replace(tzinfo=tz)
return dt.time()
def gds_check_cardinality_(
self, value, input_name,
min_occurs=0, max_occurs=1, required=None):
if value is None:
length = 0
elif isinstance(value, list):
length = len(value)
else:
length = 1
if required is not None :
if required and length < 1:
self.gds_collector_.add_message(
"Required value {}{} is missing".format(
input_name, self.gds_get_node_lineno_()))
if length < min_occurs:
self.gds_collector_.add_message(
"Number of values for {}{} is below "
"the minimum allowed, "
"expected at least {}, found {}".format(
input_name, self.gds_get_node_lineno_(),
min_occurs, length))
elif length > max_occurs:
self.gds_collector_.add_message(
"Number of values for {}{} is above "
"the maximum allowed, "
"expected at most {}, found {}".format(
input_name, self.gds_get_node_lineno_(),
max_occurs, length))
def gds_validate_builtin_ST_(
self, validator, value, input_name,
min_occurs=None, max_occurs=None, required=None):
if value is not None:
try:
validator(value, input_name=input_name)
except GDSParseError as parse_error:
self.gds_collector_.add_message(str(parse_error))
def gds_validate_defined_ST_(
self, validator, value, input_name,
min_occurs=None, max_occurs=None, required=None):
if value is not None:
try:
validator(value)
except GDSParseError as parse_error:
self.gds_collector_.add_message(str(parse_error))
def gds_str_lower(self, instring):
return instring.lower()
def get_path_(self, node):
path_list = []
self.get_path_list_(node, path_list)
path_list.reverse()
path = '/'.join(path_list)
return path
Tag_strip_pattern_ = re_.compile(r'\{.*\}')
def get_path_list_(self, node, path_list):
if node is None:
return
tag = GeneratedsSuper.Tag_strip_pattern_.sub('', node.tag)
if tag:
path_list.append(tag)
self.get_path_list_(node.getparent(), path_list)
def get_class_obj_(self, node, default_class=None):
class_obj1 = default_class
if 'xsi' in node.nsmap:
classname = node.get('{%s}type' % node.nsmap['xsi'])
if classname is not None:
names = classname.split(':')
if len(names) == 2:
classname = names[1]
class_obj2 = globals().get(classname)
if class_obj2 is not None:
class_obj1 = class_obj2
return class_obj1
def gds_build_any(self, node, type_name=None):
# provide default value in case option --disable-xml is used.
content = ""
content = etree_.tostring(node, encoding="unicode")
return content
@classmethod
def gds_reverse_node_mapping(cls, mapping):
return dict(((v, k) for k, v in mapping.items()))
@staticmethod
def gds_encode(instring):
if sys.version_info.major == 2:
if ExternalEncoding:
encoding = ExternalEncoding
else:
encoding = 'utf-8'
return instring.encode(encoding)
else:
return instring
@staticmethod
def convert_unicode(instring):
if isinstance(instring, str):
result = quote_xml(instring)
elif sys.version_info.major == 2 and isinstance(instring, unicode):
result = quote_xml(instring).encode('utf8')
else:
result = GeneratedsSuper.gds_encode(str(instring))
return result
def __eq__(self, other):
def excl_select_objs_(obj):
return (obj[0] != 'parent_object_' and
obj[0] != 'gds_collector_')
if type(self) != type(other):
return False
return all(x == y for x, y in zip_longest(
filter(excl_select_objs_, self.__dict__.items()),
filter(excl_select_objs_, other.__dict__.items())))
def __ne__(self, other):
return not self.__eq__(other)
# Django ETL transform hooks.
def gds_djo_etl_transform(self):
pass
def gds_djo_etl_transform_db_obj(self, dbobj):
pass
# SQLAlchemy ETL transform hooks.
def gds_sqa_etl_transform(self):
return 0, None
def gds_sqa_etl_transform_db_obj(self, dbobj):
pass
def gds_get_node_lineno_(self):
if (hasattr(self, "gds_elementtree_node_") and
self.gds_elementtree_node_ is not None):
return ' near line {}'.format(
self.gds_elementtree_node_.sourceline)
else:
return ""
def getSubclassFromModule_(module, class_):
'''Get the subclass of a class from a specific module.'''
name = class_.__name__ + 'Sub'
if hasattr(module, name):
return getattr(module, name)
else:
return None
#
# If you have installed IPython you can uncomment and use the following.
# IPython is available from http://ipython.scipy.org/.
#
## from IPython.Shell import IPShellEmbed
## args = ''
## ipshell = IPShellEmbed(args,
## banner = 'Dropping into IPython',
## exit_msg = 'Leaving Interpreter, back to program.')
# Then use the following line where and when you want to drop into the
# IPython shell:
# ipshell('<some message> -- Entering ipshell.\nHit Ctrl-D to exit')
#
# Globals
#
ExternalEncoding = ''
# Set this to false in order to deactivate during export, the use of
# name space prefixes captured from the input document.
UseCapturedNS_ = True
CapturedNsmap_ = {}
Tag_pattern_ = re_.compile(r'({.*})?(.*)')
String_cleanup_pat_ = re_.compile(r"[\n\r\s]+")
Namespace_extract_pat_ = re_.compile(r'{(.*)}(.*)')
CDATA_pattern_ = re_.compile(r"<!\[CDATA\[.*?\]\]>", re_.DOTALL)
# Change this to redirect the generated superclass module to use a
# specific subclass module.
CurrentSubclassModule_ = None
#
# Support/utility functions.
#
def showIndent(outfile, level, pretty_print=True):
if pretty_print:
for idx in range(level):
outfile.write(' ')
def quote_xml(inStr):
"Escape markup chars, but do not modify CDATA sections."
if not inStr:
return ''
s1 = (isinstance(inStr, BaseStrType_) and inStr or '%s' % inStr)
s2 = ''
pos = 0
matchobjects = CDATA_pattern_.finditer(s1)
for mo in matchobjects:
s3 = s1[pos:mo.start()]
s2 += quote_xml_aux(s3)
s2 += s1[mo.start():mo.end()]
pos = mo.end()
s3 = s1[pos:]
s2 += quote_xml_aux(s3)
return s2
def quote_xml_aux(inStr):
s1 = inStr.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
return s1
def quote_attrib(inStr):
s1 = (isinstance(inStr, BaseStrType_) and inStr or '%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
if '"' in s1:
if "'" in s1:
s1 = '"%s"' % s1.replace('"', """)
else:
s1 = "'%s'" % s1
else:
s1 = '"%s"' % s1
return s1
def quote_python(inStr):
s1 = inStr
if s1.find("'") == -1:
if s1.find('\n') == -1:
return "'%s'" % s1
else:
return "'''%s'''" % s1
else:
if s1.find('"') != -1:
s1 = s1.replace('"', '\\"')
if s1.find('\n') == -1:
return '"%s"' % s1
else:
return '"""%s"""' % s1
def get_all_text_(node):
if node.text is not None:
text = node.text
else:
text = ''
for child in node:
if child.tail is not None:
text += child.tail
return text
def find_attr_value_(attr_name, node):
attrs = node.attrib
attr_parts = attr_name.split(':')
value = None
if len(attr_parts) == 1:
value = attrs.get(attr_name)
elif len(attr_parts) == 2:
prefix, name = attr_parts
if prefix == 'xml':
namespace = 'http://www.w3.org/XML/1998/namespace'
else:
namespace = node.nsmap.get(prefix)
if namespace is not None:
value = attrs.get('{%s}%s' % (namespace, name, ))
return value
def encode_str_2_3(instr):
return instr
class GDSParseError(Exception):
pass
def raise_parse_error(node, msg):
if node is not None:
msg = '%s (element %s/line %d)' % (msg, node.tag, node.sourceline, )
raise GDSParseError(msg)
class MixedContainer:
# Constants for category:
CategoryNone = 0
CategoryText = 1
CategorySimple = 2
CategoryComplex = 3
# Constants | |
dératiser déréaliser
dérégionaliser déréglementer déréguler dérembourser déréprimer
déresponsabiliser dérestaurer dérider dérigidifier dériver dérober dérocher
dérocter déroder déroquer dérouiller dérouler dérouter déroyaliser dérueller
déruraliser dérussiser désabonner désabouter désabriter désabuser désaccentuer
désacclimater désaccorder désaccorer désaccoupler désaccoutumer désachalander
désacraliser désactiver désadapter désadopter désaffecter désaffectionner
désafférenter désaffleurer désaffourcher désaffubler désagater désagrafer
désailer désaimanter désaimer désaisonnaliser désaisonner désajuster
désalcoyler désaligner désaliniser désallouer désalper désalphabétiser
désaluminiser désamarrer désambiguer désambiguïser désaméricaniser désamianter
désamidonner désaminer désancrer désanctuariser désangler désangliciser
désangoisser désankyloser désannexer désapeurer désappareiller désapparenter
désappointer désapprouver désapprovisionner désarabiser désarchiver
désarçonner désargenter désaristocratiser désarmer désarmorcer désaromatiser
désarrimer désarticuler désasiatiser désasphalter désaspirer désassembler
désassibiler désassimiler désassurer désatelliser désatomiser désattrister
désaturer désauber désautoriser désaveugler désavouer désaxer désazoter
desceller déschister déschlammer déscolariser désécailler déséchafauder
déséchouer déséclairer désécologiser déséconomiser désectoriser
déségrégationner désélectriser désémantiser désemballer désembarquer
désembaucher désembobiner désembourber désembourgeoiser désembouteiller
désembringuer désembrocher désembrouiller désembroussailler désembuer
désemmancher désemmêler désemmieller désemmitoufler désemmurer désempailler
désemparer désempêtrer désemphatiser désempierrer désempiler désemplumer
désempoisonner désempoissonner désemprisonner désemprunter désémulsionner
désenamourer désénamourer désencadrer désencanailler désencapsuler
désencapuchonner désencarter désencartonner désencastrer désencaustiquer
désenchaîner désenchanter désenchevêtrer désenclaver désenclencher désenclouer
désencoller désencombrer désencorder désencroûter désencuivrer désendetter
désendimancher désenfiler désenflammer désenfler désenfourner désenfumer
désengazonner désenglober désengluer désengommer désenivrer désenliser
désenrhumer désenrober désenrôler désenrouer désenrubanner désensabler
désensacher désenseigner désenserrer désensibiliser désensommeiller
désensoufrer désentartrer désenterrer désentêter désenthousiasmer désentoiler
désentortiller désentraver désenturbanner désenvaser désenvenimer désenverguer
désenvoûter désépargner désépauler désépingler déséquetter déséquilibrer
déséquiper désergoter désérotiser déserter désertiser désétamer désétatiser
déséthaniser désétoffer déseuropéaniser désexciter désexualiser déshabiliter
déshabiller déshabiter déshabituer désharmoniser désharnacher désharponner
déshémoglobiniser désherber déshériter désheurer déshistoriciser
déshomogénéiser déshonorer déshospitaliser déshuiler déshumaniser déshydrater
désiconiser désidéaliser désidentifier désidéologiser designer désigner
désiler désilicater désillusionner désillustrer désimbriquer désimmuniser
désimperméabiliser désincarner désincorporer désincruster désinculper
désindemniser désindexer désindividualiser désindustrialiser désinfantiliser
désinfatuer désinfecter désinféoder désinformatiser désinformer désinhiber
désinitialiser désinsectiser désinstaller désintellectualiser désintéresser
désinternationaliser désintoxiquer désintriquer désinvaginer désinventer
désinviter désioniser désirer désislamiser désisoler désister désitalianiser
désobstruer désobuser désoccidentaliser désocculter désoccuper désocialiser
désodoriser désoeuvrer désofficialiser désoler désolidariser désolvater
désongler désoperculer désophistiquer désopiler désorber désorbiter
désordonner désorganiser désorientaliser désorienter désosser désoufrer
désoutiller désoviétiser désoxyder déspécialiser déspiraliser déspiritualiser
désponsoriser despotiser desquamer dessabler dessaigner dessaisonaliser
dessaisonner dessaler dessaliniser dessangler dessaouler desseller desserrer
dessiller dessiner dessoler dessoucher dessouder dessouler dessoûler
dessuinter déstabiliser déstaliniser déstandardiser déstariser déstériliser
destiner destituer déstocker déstresser destructurer déstructurer
désubjectiviser désubstantialiser désubventionner désucrer désulfater
désulfiter désulfurer désurbaniser désurchauffer désurtaxer désymboliser
désynchroniser désyndicaliser détabler détabouiser détacher détailler détaler
détalinguer détaller détalonner détalquer détamiser détanner détanniser
détaper détapisser détarifer détartrer détatouer détaxer détayloriser
détechnocratiser détecter détériorer déterminer déterminiser déterrer
déterritorialiser détester déteutonner déthéâtraliser déthéiner déthésauriser
détimbrer détiquer détirefonner détirer détisser détitiser détitrer détoner
détonner détortiller détotaliser détourer détourner détoxiquer détracter
détrancaner détrancher détrapper détraquer détremper détresser détribaliser
détricoter détripler détromper détroncher détrôner détronquer détroquer
détrousser détuber dévaginer dévaler dévaliser dévaloriser dévaluer dévaser
dévaster développer déventer dévergonder déverguer déverrouiller déverser
dévider deviner dévirer dévirginiser déviriliser déviroler deviser dévisser
dévitaliser dévitaminer dévitaminiser dévocaliser dévoiler dévoiser dévoler
dévolter dévorer dévouer dévriller dextriniser dézinguer diaboliser diaconiser
diagnostiquer diagonaliser dialectaliser dialectiser dialoguer dialyser
diamanter diapasonner diaphanéiser diaphaniser diaphragmer diaprer diastaser
diazoter dichotomiser dicter diésélifier diéséliser diffamer diffluer
difformer diffracter diffuser difluorer digitaliser digresser diguer
dihydroxyler diioder dilapider dilater diligenter diluer dimensionner dîmer
dimériser diminuer dindonner dîner dinguer dinitrer diogéniser diphtonguer
diplexer diplomatiser diplômer dirimer discerner discipliner disconnecter
discontinuer discorder discréditer discrétiser discriminer disculper
discutailler discuter disjoncter disloquer dismuter disneyiser dispatcher
dispenser disperser disponibiliser disposer disproportionner disputailler
disputer disquer dissembler disséminer disserter dissimiler dissimuler
dissiper dissoner dissuader distiller distinguer distribuer disubstituer
divaguer diverticuler diviniser diviser divulguer dociliser documenter
dodeliner dodiner dogmatiser doguer doigter dolenter doler dollariser
dolomitiser domanialiser domestiquer dominer domotiser dompter donjuaniser
donner doper dorer dorloter dormailler dormichonner dorsaliser doser dosser
doter douaner doublecliquer double-cliquer doubler <NAME>
douiller <NAME> drageonner <NAME> dr<NAME>
dramatiser <NAME> dresdeniser dresser dribbler dribler driller driver
droguer droitiser droper dropper drosser dualiser dudgeonner duiter dumper
duper duplexer duplicater dupliquer duraminiser durer dynamiser dynamiter
dysfonctionner ébarber ébaucher éberluer éberner éboguer éborgner ébosser
ébotter ébouer ébouillanter ébouler ébourgeonner ébouriffer ébourrer ébouser
ébousiner ébouter ébouturer ébraiser ébrancher ébranler ébraser ébrauder
ébroder ébrouder ébrouer ébrousser ébruiter ébruter éburnifier écacher écaffer
écailler écaler écanguer écapsuler écarbouiller écarder écarquiller écarter
écarver ecchymoser écepper échafauder échaloter échancrer échantillonner
échanvrer échapper échardonner écharner écharper écharpiller échauder
échauffer échauler échaumer échelonner écheniller échevetter échigner échiner
écholocaliser échopper échosonder échouer écimer éclabousser éclairer éclater
éclipser éclisser écloper écluser écobuer écoeurer écoiner écointer écologiser
économiser écoper écorcher écorer écorner écornifler écosser écôter écouler
écourter écouter écrabouiller écraminer écraser écrêter écrivailler écroter
écrouer écrouler écroûter ectomiser écuisser éculer écumer écurer écussonner
eczématiser édéniser édenter édicter éditer éditorialiser édulcorer éduquer
éfaufiler effaner effarer effaroucher effectuer efféminer effeuiller effiler
effilocher effiloquer efflanquer effleurer efflorer effluer effluver effondrer
effriter effruiter effumer effuser égailler égaler égaliser égarer égauler
églomiser égobler égorgiller égosiller égousser égoutter égrainer égraminer
égrapper égratigner égravillonner égriser égueuler égyptianiser éherber
éhouper éjaculer éjarrer éjecter éjointer élaborer élaguer élaïdiser élaiter
élaver électriser électrocuter électrodéposer électrolocaliser électrolyser
électroner électroniser électropolymériser électropuncturer électrozinguer
élégantiser éliciter élider élimer éliminer élinguer ellipser éloigner
élucider élucubrer éluder éluer émailler émanciper émaner émasculer
embabouiner emballer emballotter embaluchonner embalustrer embander
embarbouiller embarder embarquer embarrer embastiller embastionner embâtonner
embaucher embaumer embecquer embéguiner emberlicoquer emberlificoter
emberloquer emberlucoquer embesogner embêter embidonner embieller emblaver
emblématiser embler embobeliner embobiner emboiser emboîter emboliser embosser
emboucaner emboucauter emboucher emboucler embouer embouquer embourber
embourgeoiser embourrer embourser embouser embouteiller embouter embrancher
embraquer embraser embrelicoquer embreuver embrigader embringuer embrocher
embrouiller embroussailler embruiner embrumer embûcher embuer embusquer
émender émerillonner émeriser émerveiller émétiser émeuler émietter émigrer
emmagasiner emmailler emmailloter emmancher emmarquiser emmêler emmenotter
emmerder emmeuler emmiasmer emmieller emmitonner emmitouffler emmitoufler
emmotter emmoufler emmouscailler emmurailler emmurer émonder émorfiler
émotionner émotter émoucher émousser émoustiller empaffer empailler empaler
empalmer empanacher empanner empapaouter empapillonner empapilloter
emparadiser emparer emparquer empatter empaumer empêcher empeigner empeloter
empelotonner empenner emperler emperruquer empester empêtrer emphatiser
empierrer empiffrer empiler empirer emplanter emplastrer emplâtrer emplomber
emplumer empocher empoicrer empoigner empointer empoisonner empoisser
empoissonner empommer emporter empoter empourprer empouter empresser
emprésurer emprisonner emprunter émuler émulsionner enamourer énamourer
enarbrer énaser encabaner encadrer encagouler encaisser encalminer encanailler
encaper encapsuler encapuchonner encaquer encarter encartonner encaserner
encaster encastrer encaustiquer encaver enceinter enceintrer encenser
encéphaliser encercler enchaîner enchanter enchaper enchaperonner encharbonner
encharner enchasser enchâsser enchatonner enchausser enchaussumer enchemiser
enchevaler enchevaucher enchevêtrer encirer enclaver enclencher encloîtrer
enclouer encocher encoder encoffrer encoigner encoller encombrer encorbeller
encorder encorner encoubler encourtiner encrer encrister encroiser encrotter
encrouer encroûter encrypter encuivrer enculer encuver endauber endenter
endetter endeuiller endêver endiabler endiamanter endiguer endimancher
endisquer endivisionner endoctriner endogénéiser endosmoser endosser
endothélialiser endouzainer endrailler endurer énergétiser énergiser énerver
éneyer enfaçonner enfaîter enfanter enfariner enfermer enferrailler enferrer
enficher enfieller enfiler enflammer enflaquer enfler enfleurer enformer
enfosser enfourcher enfourner enfricher enfumer enfutailler enfûter engainer
engaller engamer enganter engargousser engaver engazonner engeigner engendrer
engerber englaçonner englober engluer engober engommer engouer engouffrer
engouler engraisser engraver engrêler engrisailler engrosser engueuler
engueuser enguicher enguirlander enharnacher enherber énieller enivrer enjaler
enjamber enjanter enjôler enjoliver enjouer enjouguer enjuguer enjuiver
enkikiner enkyster enlarmer enligner enlinceuler enliser enluminer énoliser
énoper énouer enquêter enquiller enquinauder enquiquiner enraciner enrailler
enrégimenter enregistrer enrêner enrésiner enrhumer enrober enrocher enrôler
enrouer enrouiller enrouler enrubanner ensabler ensaboter ensacher ensafraner
ensaisiner ensanglanter ensauver enseigner enseller enserrer enseuiller
ensiler ensiloter ensimer ensoleiller ensommeiller ensoufrer ensouiller
ensoupler ensoutaner ensucrer ensuifer ensuquer entabler entacher entâcher
entailler entamer entaquer entartrer enter entériner enterrer entêter
enthousiasmer enticher entoiler entôler entomber entonner entortiller entourer
entourlouper entraccorder entraccuser entradmirer entraider entraîner entraver
entrebailler entrebâiller entrechoquer entreciter entrecouper entrecroiser
entredéchirer entre-déchirer entredévorer entre-dévorer entredonner
entrefermer entregloser entregreffer entrelarder entrelouer entremêler
entrepardonner entrepointiller entreposer entrequereller entrer entreregarder
entreserrer entretailler entreteiller entretoiser entretuer entrevoûter
entrexaminer entruster entuber enturbanner énupler énuquer envacuoler envaler
envaper envaser envelopper envenimer enverguer enverrer envider envirer
environner envoiler envoisiner envoler envoûter enwagonner éoliser épailler
épaler épamprer épancher épanner épanouiller épargner éparpiller épater
épaufrer épauler épépiner éperonner épeuler épeurer épierrer épigéniser
épilamer épiler épiloguer épimériser épiner épingler épisser épithélialiser
épithétiser éplorer éplucher époiler époinçonner épointer épointiller
épontiller épouffer épouiller époumoner époumonner épouser époustoufler
épouvanter éprouver épuiser épurer équerrer équeuter équilibrer équiper
équipoller équivoquer éradiquer érafler | |
found')
return value
#
def load_resources(self, resfile:str, *, delimiter:str=',', strict:bool=True, escaped:bool=False) -> dict:#, keyname:Union[str,int]='Key', valname:Union[str,int]='Value') -> dict:
resources:dict = {}
respath:str = os.path.join(os.path.dirname(self.filename), resfile)
# keycol = keyname if isinstance(keyname, int) else None
# valcol = valname if isinstance(valname, int) else None
with open(respath, 'rt', encoding='utf-8') as f:
print('Resources:')
reader = csv.reader(f, delimiter=delimiter or ',')
first = True
for i,row in enumerate(reader):
if not row: #TODO: is this possible?
continue
if first:
keyname_, valname_ = row
print(keyname_, valname_)
first = False
if strict and tuple(r.lower() for r in row) != ('key', 'value'):
raise Exception(f'Invalid resource file {resfile!r}. Expected first row to have column names [\'Key\',\'Value\'], not {row!r}')
continue
if not strict and len(row) == 1:
key, value = row[0], ''
else:
key, value = row
if escaped:
value = unescape(value, not strict)
if strict and key in resources:
raise Exception(f'Resource file {resfile!r} defines duplicate key {key!r} on line {(i+1)}')
print(f'[{key!r}] {escape_ignorequotes(value)}')
resources[key] = value
return resources
#
#endregion
#
# def skipws(self, *, pos:int=..., peek:bool=False):
# if pos is Ellipsis: pos = self.pos
# token = self.parse_token(pos=pos, peek=True)
# while token.type is TokenType.WHITESPACE:
# pos = token.end
# if not peek:
# self.pos = pos
# token = self.parse_token(pos=pos, peek=True)
# return token.end
#
def next_line(self) -> bool:
if self.is_eof:
return False # EOF
self.is_eol = False
self.line = self.file.readline()
self.line_num += 1
# print(f'{self.line_num} : ', end='')
self.pos = 0
if not self.line:
self.is_eol = True
self.is_eof = True
return False # EOF
self.line = self.line.rstrip('\r\n')
if not self.line:
self.is_eol = True
elif self.is_block_comment:
# skip beginning whitespace
self._next_token_handle_ws(self.pos)
return True
#
def _next_token_handle_ws(self, pos:int=...) -> Optional[ParseToken]:
"""If the next token is whitespace, any directly-connected comments
are merged into a single token.
"""
if pos is Ellipsis: pos = self.pos
end = pos
token_type:TokenType = None
if self.is_block_comment:
RE_COMMENT_END = re.compile(r"^.*(\*\/)")
m:Match = RE_COMMENT_END.search(self.line[pos:])
if m:
end = m.end() + pos
self.is_block_comment = False
self.block_comment_line = None
self.block_comment_pos = None
token_type = TokenType.WHITESPACE
else:
end = pos
self.is_eol = True
token_type = TokenType.EOL
if not self.is_block_comment:
# specially handle whitespace and normalize comments
RE_ALL_WHITESPACE = re.compile(r"^(?:(\s+)|(\/\*[^\n]*?\*\/)|((?:\/\/|;).*$)|(\/\*)|($))")#|(.*\*\/)")
# if not isinstance(pos, int): print(f'pos type is {pos.__class__.__name__}')
m:Match = RE_ALL_WHITESPACE.search(self.line[pos:])
while m:
if m[1]: # whitespace
end = m.end() + pos
token_type = TokenType.WHITESPACE
elif m[2]: # inline comment
# pad with whitespace to preserve character position
#space = re.sub(r"[^\r\n]", r" ", self.line[m.start()+pos:m.end()+pos])
#self.line = space.join( (self.line[:m.start()+pos], self.line[m.end()+pos:]) )
end = m.end() + pos
token_type = TokenType.WHITESPACE # inline-comments constitute whitespace (even if VSCode doesn't show it)
elif m[3]: # line comment (EOL)
# self.line = self.line[:m.start()+pos]
end = m.start() + pos
self.is_eol = True
token_type = TokenType.EOL
break # break, we're done
elif m[4]: # open block comment (EOL+)
# self.line = self.line[:m.start()+pos]
end = m.start() + pos
self.is_eol = True
self.is_block_comment = True
self.block_comment_line = self.line_num
self.block_comment_pos = m.start() + pos
token_type = TokenType.EOL
break # break, we're done
elif m[5] is not None: # EOL
end = m.start() + pos
self.is_eol = True
token_type = TokenType.EOL
break # break, we're done
# next match
m = RE_ALL_WHITESPACE.search(self.line[m.end()+pos:])
if token_type is TokenType.WHITESPACE:
# only when there's non-whitespace left on the line
token = ParseToken(token_type, self.line[pos:end], self.line, self.line_num, pos, end)
parse_whitespace(token)
self.pos = token.end
return token
elif token_type is TokenType.EOL:
# fudge the numbers for poorly written token parsing :)
token = ParseToken(token_type, '', self.line, self.line_num, pos, pos)
parse_eol(token)
self.pos = token.end
return token
return None # not whitespace, carry on~
def next_token(self, pos:int=...) -> ParseToken:
"""parse and return the next token.
"""
if pos is Ellipsis: pos = self.pos
if self.is_eol or self.is_eof: #TODO: remove is_eof check?
token = ParseToken(TokenType.EOL, '', self.line, self.line_num, pos, pos)
parse_eol(token)
if self.is_eof:
token.kind = TokenKind.EOF
else:
token.kind = TokenKind.EOL
return token
# specially handle whitespace and normalize comments
token = self._next_token_handle_ws(pos)
if token is not None:
self.pos = token.end
return token
# try to match normal tokens
for token_type,(pattern,parser) in MATCHING.items():
m:Match = pattern.search(self.line[pos:])
if m:
# if 'func' in m[0] or 'void' in m[0]:
# input('found it!')
token = ParseToken(token_type, m, self.line, self.line_num, m.start()+pos, m.end()+pos)
self.pos = token.end
parser(token) # extracts token value
return token
print(f'failed to find token type at line {self.line_num}, pos {pos+1}\n{self.line!s}')
print(' ' * pos + '^')
raise Exception(f'failed to find token type at line {self.line_num}, pos {pos+1}\n{self.line!r}')
def next_token_skipws(self, nextline:bool=False) -> ParseToken:
token:TokenType = self.next_token()
while token.type is TokenType.WHITESPACE or (token.kind is TokenKind.EOL and nextline):
if token.kind is TokenKind.EOL:
self.next_line()
token = self.next_token()
return token
def begin_function(self, func_token:ParseToken):
if self.current_function is not None:
raise Exception('attempted to declare new function while previous has not been closed')
self.require_ws(func_token)
# token = self.next_token()
# if token.type is not TokenType.WHITESPACE:
# raise Exception(f'no whitespace after function {func_token.text!r} keyword')
hash_token = token = self.next_token()
if token.type is not TokenType.HASH:
raise Exception(f'expected hash name after function {func_token.text!r} keyword')
if token.kind is TokenKind.INLINE_HASH:
token.value = self.inline_hash(token.value)
token.kind = TokenKind.LITERAL_HASH
function = FunctionEntry(token.value, self.bytecode_pos)
token = self.next_token_skipws(nextline=True)
if token.type is not TokenType.PUNCTUATION or token.value != '(':
raise Exception(f'expected function argument declarations for {func_token.text!r} {hash_token.value!r}')
token = self.next_token_skipws(nextline=True)
type_list:list = [] # not actually used
last_type = None
while token.type is not TokenType.PUNCTUATION or token.value != ')':
if token.type is TokenType.PUNCTUATION:
if token.value != ',':
raise Exception(f'unexpected punctuation {token.value!r} during function parameter declaration during {func_token.text!r} {hash_token.value!r}')
elif last_type is None:
raise Exception(f'unexpected punctuation {token.value!r}, no type since last punctuation during {func_token.text!r} {hash_token.value!r}')
else:
last_type = None
elif token.kind is TokenKind.TYPE:
if last_type is not None:
raise Exception(f'unexpected type {token.value!r} during function parameter declaration without comma separator during {func_token.text!r} {hash_token.value!r}')
else:
last_type = token.value
type_list.append(token.value)
else:
raise Exception(f'unexpected token during function declaration parsing {token!r} during {func_token.text!r} {hash_token.value!r}')
token = self.next_token_skipws(nextline=True)
if type_list and last_type is None:
raise Exception(f'unexpected extra punctuation \',\' before closing type list during {func_token.text!r} {hash_token.value!r}')
# check for potential entrypoint directive
token = self.next_token_skipws(nextline=True)
if token.kind is TokenKind.DIRECTIVE and token.value == 'entrypoint': #TODO: don't use hardcoded names here
if self.main_offset is not None:
raise Exception(f'more than one entrypoint defined during {func_token.text!r} {hash_token.value!r}')
self.main_offset = self.bytecode_pos
token = self.next_token_skipws(nextline=True)
if token.type is not TokenType.PUNCTUATION or token.value != '{':
raise Exception(f'expected function opening brace after parameters declarations for {func_token.text!r} {hash_token.value!r}')
self.require_eol(token)
self.current_function = function
self.functions.append(function)
#TODO: clear label caches, and start anew
def end_function(self, end_token:ParseToken):
if self.current_function is None:
raise Exception(f'closing {end_token.value!r} found without starting function')
self.require_eol(end_token)
self.current_function = None
if self.current_labels:
raise Exception(f'{len(self.current_labels)} labels defined with no next instruction at end of function')
if self.unresolved_targets:
def fmt_instr(i:Instruction):
if i.opcode.mnemonic == "switch":
return f'{i.offset:05x}: {i.opcode.mnemonic} {i.switch_targets!r}'
else:
return f'{i.offset:05x}: {i.opcode.mnemonic} {i.jump_target!r}'
for s in self.unresolved_targets.values():
print(', '.join(repr(fmt_instr(i)) for i in s))
#print(list(f'{i.offset:05x}: {i.opcode.mnemonic} self.unresolved_targets.values()))
raise Exception(f'{len(self.unresolved_targets)} unresolved targets defined with no label found by end of function')
self.unresolved_targets.clear()
self.current_labels.clear()
self.labels.clear()
#TODO: clear label caches and enforce that all referenced labels are identified
def require_eol(self, last_token:ParseToken):
token = self.next_token() #_skipws()
if token.type is not TokenType.EOL:
raise Exception(f'expected end of line after last token {last_token!r}, not token {token!r}')
def require_ws(self, last_token:ParseToken):
token = self.next_token() #_skipws()
if token.type is not TokenType.WHITESPACE:
raise Exception(f'expected whitespace after previous token {last_token!r}, not token {token!r}')
def parse_line(self):
if not self.next_line():
return False # EOF
token = self.next_token_skipws()
if token.type is TokenType.EOL:
return
if token.kind is TokenKind.DIRECTIVE:
if token.value in ('readmark', 'group', 'resfile'):
directive = token
self.require_ws(directive)
token = self.next_token()
if token.kind is TokenKind.DIRECTIVE_ARG:
if directive.value == 'group':
if token.value is not None:
raise Exception(f'invalid argument {token.text!r} for {directive.value} directive')
self.group_directive = token.value
elif directive.value == 'resfile':
if token.value is not None:
raise Exception(f'invalid argument {token.text!r} for {directive.value} directive')
self.resfile_directive = token.value
self.resource_dict = None #TODO: unload this?
else:
if token.value is None:
raise Exception(f'invalid argument {token.text!r} for {directive.value} directive')
if self.readmark_directive is not None:
raise Exception(f'readmark directive already previously defined as {self.readmark_directive!r}')
self.readmark_directive = token.value
elif token.kind is TokenKind.LITERAL_STRING and | |
# python standard imports
import re
from copy import copy
from pathlib import Path
# third-party imports
from yaml import safe_load, safe_dump
# internal imports
from .tokens import TokenType, StringBuilder
from .citation import Citation
from .regex_mods import process_pattern, match_regexes
_DEFAULT_CITATOR = None
class Template:
"""
A pattern to recognize a single kind of citation and extract
information from it.
"""
def __init__(
self,
name: str,
tokens: dict[str, TokenType] = {},
meta: dict[str, str] = {},
patterns: list[str] = [],
broad_patterns: list[str] = [],
shortform_patterns: list[str] = [],
idform_patterns: list[str] = [],
name_builder: StringBuilder = None,
URL_builder: StringBuilder = None,
inherit_template = None,
):
"""
Arguments:
name: the name of this template
tokens: The full dictionary of TokenTypes that citations from
this template can contain. These must be listed in order
from least-specific to most. For instance, the U.S.
Constitution's template puts 'article' before 'section'
before 'clause', because articles contain sections, and
sections contain clauses.
patterns: Patterns are essentially regexes to recognize
recognize long-form citations to this template. However,
wherever a token would appear in the regex, it should be
replaced by the name of the token, enclosed in curly
braces.
Patterns are matched in the order that they are listed,
so if there is a pattern that can only find a subset of
tokens, it should be listed after the more-complete
pattern so that the better match won't be precluded.
broad_patterns: Same as `patterns`, except that they will
only be used in contexts like search engines, where
convenience is more important than avoiding false
positive matches. When used, they will be used in
addition to the normal patterns.
shortform_patterns: Same as `patterns`, but these will only
go into effect after a longform citation has been
recognized. If a shortform pattern includes "same
TOKEN_NAME" in curly braces, e.g. "{same volume}", the
bracketed portion will be replaced with the exact text
of the corresponding `raw_token` from the long-form
citation.
idform_patterns: Same as `shortform_patterns`, except that
they will only be used to scan text until the next
different citation occurs.
URL_builder: `StringBuilder` to construct URLs for found
citations
name_builder: `StringBuilder` to construct canonical names
of found citations
meta: Optional metadata relating to this template. Patterns
and StringBuilders can access metadata fields as if they
were tokens, though fields can be overridden by tokens
with the same name.
inherit_template: another `Template` whose values this one
should copy unless expressly overwritten.
"""
kwargs = locals()
for attr, default in {
'name': None,
'tokens': {},
'patterns': [],
'broad_patterns': [],
'shortform_patterns': [],
'idform_patterns': [],
'URL_builder': None,
'name_builder': None,
'meta': {},
}.items():
if inherit_template and kwargs[attr] == default:
value = inherit_template.__dict__.get(attr)
elif attr.endswith('patterns') and not kwargs[attr]:
value = []
else:
value = kwargs[attr]
self.__dict__[attr] = value
# update inherited StringBuilders with the correct metadata
if inherit_template and self.meta:
if self.URL_builder:
self.URL_builder = copy(self.URL_builder)
self.URL_builder.defaults = self.meta
if self.name_builder:
self.name_builder = copy(self.name_builder)
self.name_builder.defaults = self.meta
# use the template's metadata and tokens to make a dictionary
# of replacements to insert into the regexes before compilation
replacements = {k:str(v) for (k, v) in self.meta.items()}
replacements.update({
k:fr'(?P<{k}>{v.regex})(?!\w)'
for (k,v) in self.tokens.items()
})
# compile the template's regexes and broad_regexes
self.regexes = []
self.broad_regexes = []
for kind in ['regexes', 'broad_regexes']:
if kind == 'broad_regexes':
pattern_list = self.patterns + self.broad_patterns
flags = re.I
else:
pattern_list = self.patterns
flags = 0
for p in pattern_list:
pattern = process_pattern(
p,
replacements,
add_word_breaks=True
)
try:
regex = re.compile(pattern, flags)
self.__dict__[kind].append(regex)
except re.error as e:
i = 'broad ' if kind == 'broad_regexes' else ''
raise re.error(
f'{self} template\'s {i}pattern "{pattern}" has '
f'an error: {e}'
)
self._processed_shortforms = [
process_pattern(p, replacements, add_word_breaks=True)
for p in self.shortform_patterns
]
self._processed_idforms = [
process_pattern(p, replacements, add_word_breaks=True)
for p in self.idform_patterns
]
@classmethod
def from_dict(cls, name: str, values: dict, inheritables: dict={}):
"""
Return a template from a dictionary of values, like a dictionary
created by parsing a template from YAML format.
"""
values = {
k.replace(' ', '_'):v
for k,v in values.items()
}
# when pattern is listed in singular form,
# replace it with a one-item list
items = values.items()
values = {}
for key, value in items:
if key.endswith('pattern'):
values[key + 's'] = [value]
else:
values[key] = value
# unrelated: when a single pattern is split
# into a list (likely to take advantage of
# YAML anchors), join it into one string
for k,v in values.items():
if not k.endswith('patterns'):
continue
elif v is None:
values[k] = None
continue
for i, pattern in enumerate(v):
if type(pattern) is list:
values[k][i] = ''.join(pattern)
inherit = values.get('inherit')
if inherit:
values.pop('inherit')
try:
values['inherit_template'] = inheritables.get(inherit)
except KeyError:
raise KeyError(
f'Template "{name}" tried to inherit unknown '
+ f'template "{inherit}"'
)
for key in ['name_builder', 'URL_builder']:
data = values.get(key)
if data:
data['defaults'] = values.get('meta') or {}
values[key] = StringBuilder.from_dict(data)
values['tokens'] = {
k: TokenType.from_dict(k, v)
for k,v in values.get('tokens', {}).items()
}
return cls(name=name, **values)
def to_dict(self) -> dict:
"save this Template to a dictionary of values"
output = {}
if self.meta:
output['meta'] = self.meta
output['tokens'] = {
k:v.to_dict() for k, v in self.tokens.items()
}
for key in ['patterns', 'shortform_patterns', 'idform_patterns']:
value = self.__dict__.get(key)
if not value:
continue
elif len(value) > 1:
output[key] = value
else: # de-pluralize lists that contain only one pattern
output[key[:-1]] = value[0]
for key in ['name_builder', 'URL_builder']:
if self.__dict__.get(key):
output[key] = self.__dict__[key].to_dict()
spaced_output = {k.replace('_', ' '):v for k, v in output.items()}
return spaced_output
def to_yaml(self) -> str:
"save this Template to a YAML string"
return safe_dump(
{self.name: self.to_dict()},
sort_keys = False,
allow_unicode = True,
)
def cite(self, text, broad: bool=True, span: tuple=(0,)) -> Citation:
"""
Return the first citation that matches this template. If 'broad'
is True, case-insensitive matching and broad regex patterns will
be used. If no matches are found, return None.
"""
regexes = self.broad_regexes if broad else self.regexes
matches = match_regexes(text, regexes, span=span)
for match in matches:
try:
return Citation(match, self)
except SyntaxError: # invalid citation
continue
else:
return None
def list_longform_cites(self, text, broad: bool=False, span: tuple=(0,)):
"""
Get a list of all long-form citations to this template found in
the given text.
"""
cites = []
regexes = self.broad_regexes if broad else self.regexes
for match in match_regexes(text, regexes, span=span):
try:
cites.append(Citation(match, self))
except SyntaxError:
continue
return cites
def __str__(self):
return self.name
def __repr__(self):
return (
f'Template(name="{self.name}"'
+ (f', tokens={self.tokens}' if self.tokens else '')
+ (f', meta={self.meta}' if self.meta else '')
+ (f', patterns={self.patterns}' if self.patterns else '')
+ (
f', broad_patterns={self.broad_patterns}'
if self.broad_patterns else ''
)
+ (
f', shortform_patterns={self.shortform_patterns}'
if self.shortform_patterns else ''
)
+ (
f', idform_patterns={self.idform_patterns}'
if self.idform_patterns else ''
)
+ (
f', name_builder={self.name_builder}'
if self.name_builder else ''
)
+ (
f', URL_builder={self.URL_builder}'
if self.URL_builder else ''
)
+ ')'
)
def __contains__(self, citation: Citation):
return citation.template.name == self.name
def __eq__(self, other_template):
return repr(self) == repr(other_template)
class Citator:
"""
A collection of citation templates, and the tools to match text
against them en masse.
Attributes:
templates: a dictionary of citation templates that this citator
will try to match against
"""
def __init__(
self,
defaults = [
'caselaw',
'general federal law',
'specific federal laws',
'state law',
'secondary sources',
],
yaml_paths: list[str] = [],
templates: dict[str, Template] = {},
):
"""
Create a citator from any combination of CiteURL's default
template sets (by default, all of them), plus any custom
templates you want, either by pointing to custom YAML files or
making Template objects at runtime.
Arguments:
defaults: names of files to load from the citeurl/templates
folder. Each file contains one or more of CiteURL's
built-in templates relevant to the given topic.
yaml_paths: paths to custom YAML files to load templates
from. These are loaded after the | |
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(32)
new_vm._push(48)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_63(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {6089590155545428825848686802984512581899718912: 32}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(32)
new_vm._push(6089590155545428825848686802984512581899718912)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_64(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {115792089237316195423570985008687907853269984665640564039457584007913129639935: 48}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(48)
new_vm._push(115792089237316195423570985008687907853269984665640564039457584007913129639935)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_65(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {0: 48}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(48)
new_vm._push(0)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_66(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {1: 48}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(48)
new_vm._push(1)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_67(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {57896044618658097711785492504343953926634992332820282019728792003956564819952: 48}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(48)
new_vm._push(57896044618658097711785492504343953926634992332820282019728792003956564819952)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_68(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {3618502788666131106986593281521497120414687020801267626233049500247285301263: 48}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(48)
new_vm._push(3618502788666131106986593281521497120414687020801267626233049500247285301263)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_69(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {16: 48}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(48)
new_vm._push(16)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_70(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {32: 48}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(48)
new_vm._push(32)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_71(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {48: 48}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(48)
new_vm._push(48)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_72(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {6089590155545428825848686802984512581899718912: 48}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(48)
new_vm._push(6089590155545428825848686802984512581899718912)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_73(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {115792089237316195423570985008687907853269984665640564039457584007913129639935: 6089590155545428825848686802984512581899718912}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(6089590155545428825848686802984512581899718912)
new_vm._push(115792089237316195423570985008687907853269984665640564039457584007913129639935)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_74(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {0: 6089590155545428825848686802984512581899718912}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(6089590155545428825848686802984512581899718912)
new_vm._push(0)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_75(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {1: 6089590155545428825848686802984512581899718912}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(6089590155545428825848686802984512581899718912)
new_vm._push(1)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_76(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {57896044618658097711785492504343953926634992332820282019728792003956564819952: 6089590155545428825848686802984512581899718912}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(6089590155545428825848686802984512581899718912)
new_vm._push(57896044618658097711785492504343953926634992332820282019728792003956564819952)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_77(self):
#Make the constraint store
constraints = ConstraintSet()
#make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
balance = None
code = 'U'
storage = {3618502788666131106986593281521497120414687020801267626233049500247285301263: 6089590155545428825848686802984512581899718912}
world.create_account( address=address, balance=balance, code=code, storage=storage)
address=0x222222222222222222222222222222222222200
caller=origin=0x111111111111111111111111111111111111100
price=0
value=10000
bytecode='U'
data = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
header = { 'coinbase': 0,
'timestamp': 0,
'number': 0,
'difficulty': 0,
'gaslimit': 0,
}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(6089590155545428825848686802984512581899718912)
new_vm._push(3618502788666131106986593281521497120414687020801267626233049500247285301263)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [])
def test_SSTORE_78(self):
#Make the constraint store
constraints | |
<gh_stars>0
#
#*******************************************************************************
# Copyright 2014-2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#******************************************************************************/
import numpy as np
import numbers
from sklearn import decomposition
from sklearn.utils import check_array
from sklearn.decomposition.pca import PCA as PCA_original
from sklearn.decomposition.pca import (_infer_dimension_, svd_flip)
from sklearn.utils.validation import check_is_fitted
from sklearn.utils.extmath import stable_cumsum
from scipy.sparse import issparse
import daal4py
from .._utils import getFPType, method_uses_sklearn, method_uses_daal
import logging
def _daal4py_svd(X):
X = check_array(X, dtype=[np.float64, np.float32])
X_fptype = getFPType(X)
alg = daal4py.svd(
fptype=X_fptype,
method='defaultDense',
leftSingularMatrix='requiredInPackedForm',
rightSingularMatrix='requiredInPackedForm'
)
res = alg.compute(X)
s = res.singularValues
U = res.leftSingularMatrix
V = res.rightSingularMatrix
return U, np.ravel(s), V
def _validate_n_components(n_components, n_samples, n_features):
if n_components == 'mle':
if n_samples < n_features:
raise ValueError("n_components='mle' is only supported "
"if n_samples >= n_features")
elif not 0 <= n_components <= min(n_samples, n_features):
raise ValueError("n_components=%r must be between 0 and "
"min(n_samples, n_features)=%r with "
"svd_solver='full'"
% (n_components, min(n_samples, n_features)))
elif n_components >= 1:
if not isinstance(n_components, (numbers.Integral, np.integer)):
raise ValueError("n_components=%r must be of type int "
"when greater than or equal to 1, "
"was of type=%r"
% (n_components, type(n_components)))
def _process_n_components_None(self_n_components, self_svd_solver, X_shape):
# Handle n_components==None
if self_n_components is None:
if self_svd_solver != 'arpack':
n_components = min(X_shape)
else:
n_components = min(X_shape) - 1
else:
n_components = self_n_components
return n_components
def _fit_full(self, X, n_components):
"""Fit the model by computing full SVD on X"""
n_samples, n_features = X.shape
_validate_n_components(n_components, n_samples, n_features)
# Center data
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
if X.shape[0] > X.shape[1] and (X.dtype == np.float64 or X.dtype == np.float32):
U, S, V = _daal4py_svd(X)
else:
U, S, V = np.linalg.svd(X, full_matrices=False)
# flip eigenvectors' sign to enforce deterministic output
U, V = svd_flip(U, V)
components_ = V
# Get variance explained by singular values
explained_variance_ = (S ** 2) / (n_samples - 1)
total_var = explained_variance_.sum()
explained_variance_ratio_ = explained_variance_ / total_var
# Postprocess the number of components required
if n_components == 'mle':
n_components = \
_infer_dimension_(explained_variance_, n_samples, n_features)
elif 0 < n_components < 1.0:
# number of components for which the cumulated explained
# variance percentage is superior to the desired threshold
ratio_cumsum = explained_variance_ratio_.cumsum()
n_components = np.searchsorted(ratio_cumsum, n_components) + 1
# Compute noise covariance using Probabilistic PCA model
# The sigma2 maximum likelihood (cf. eq. 12.46)
if n_components < min(n_features, n_samples):
self.noise_variance_ = explained_variance_[n_components:].mean()
else:
self.noise_variance_ = 0.
self.n_samples_, self.n_features_ = n_samples, n_features
self.components_ = components_[:n_components]
self.n_components_ = n_components
self.explained_variance_ = explained_variance_[:n_components]
self.explained_variance_ratio_ = \
explained_variance_ratio_[:n_components]
self.singular_values_ = S[:n_components]
return U, S, V
_fit_full_copy = _fit_full
class PCA_prev(PCA_original):
__doc__ = PCA_original.__doc__
def __init__(self, n_components=None, copy=True, whiten=False,
svd_solver='auto', tol=0.0, iterated_power='auto',
random_state=None):
self.n_components = n_components
self.copy = copy
self.whiten = whiten
self.svd_solver = svd_solver
self.tol = tol
self.iterated_power = iterated_power
self.random_state = random_state
def _fit_full(self, X, n_components):
return _fit_full_copy(self, X, n_components)
class PCA(PCA_original):
def __init__(self, n_components=None, copy=True, whiten=False,
svd_solver='auto', tol=0.0, iterated_power='auto',
random_state=None):
self.n_components = n_components
self.copy = copy
self.whiten = whiten
self.svd_solver = svd_solver
self.tol = tol
self.iterated_power = iterated_power
self.random_state = random_state
def _fit_daal4py(self, X, n_components):
n_samples, n_features = X.shape
n_sf_min = min(n_samples, n_features)
_validate_n_components(n_components, n_samples, n_features)
if n_components == 'mle':
daal_n_components = n_features
elif n_components < 1:
daal_n_components = n_sf_min
else:
daal_n_components = n_components
fpType = getFPType(X)
centering_algo = daal4py.normalization_zscore(
fptype=fpType, doScale=False)
pca_alg = daal4py.pca(
fptype=fpType,
method='svdDense',
normalization=centering_algo,
resultsToCompute='mean|variance|eigenvalue',
isDeterministic=True,
nComponents=daal_n_components
)
pca_res = pca_alg.compute(X)
self.mean_ = pca_res.means.ravel()
variances_ = pca_res.variances.ravel()
components_ = pca_res.eigenvectors
explained_variance_ = pca_res.eigenvalues.ravel()
tot_var = explained_variance_.sum()
explained_variance_ratio_ = explained_variance_ / tot_var
if n_components == 'mle':
n_components = \
_infer_dimension_(explained_variance_, n_samples, n_features)
elif 0 < n_components < 1.0:
# number of components for which the cumulated explained
# variance percentage is superior to the desired threshold
ratio_cumsum = stable_cumsum(explained_variance_ratio_)
n_components = np.searchsorted(ratio_cumsum, n_components) + 1
# Compute noise covariance using Probabilistic PCA model
# The sigma2 maximum likelihood (cf. eq. 12.46)
if n_components < n_sf_min:
if explained_variance_.shape[0] == n_sf_min:
self.noise_variance_ = explained_variance_[n_components:].mean()
else:
resid_var_ = variances_.sum()
resid_var_ -= explained_variance_[:n_components].sum()
self.noise_variance_ = resid_var_ / (n_sf_min - n_components)
else:
self.noise_variance_ = 0.
self.n_samples_, self.n_features_ = n_samples, n_features
self.components_ = components_[:n_components]
self.n_components_ = n_components
self.explained_variance_ = explained_variance_[:n_components]
self.explained_variance_ratio_ = \
explained_variance_ratio_[:n_components]
self.singular_values_ = np.sqrt((n_samples - 1) * self.explained_variance_)
def _transform_daal4py(self, X, whiten=False, scale_eigenvalues=True, check_X=True):
check_is_fitted(self, ['mean_', 'components_'], all_or_any=all)
X = check_array(X, dtype=[np.float64, np.float32], force_all_finite=check_X)
fpType = getFPType(X)
tr_data = dict()
if self.mean_ is not None:
tr_data['mean'] = self.mean_.reshape((1, -1))
if whiten:
if scale_eigenvalues:
tr_data['eigenvalue'] = (self.n_samples_ - 1) * self.explained_variance_.reshape((1, -1))
else:
tr_data['eigenvalue'] = self.explained_variance_.reshape((1, -1))
elif scale_eigenvalues:
tr_data['eigenvalue'] = np.full(
(1, self.explained_variance_.shape[0]),
self.n_samples_ - 1.0, dtype=X.dtype)
if X.shape[1] != self.n_features_:
raise ValueError("The number of features of the input data, {}, is not "
"equal to the number of features of the training data, {}".format(
X.shape[1], self.n_features_))
tr_res = daal4py.pca_transform(
fptype=fpType
).compute(X, self.components_, tr_data)
return tr_res.transformedData
def _fit_full_daal4py(self, X, n_components):
n_samples, n_features = X.shape
# due to need to flip components, need to do full decomposition
self._fit_daal4py(X, min(n_samples, n_features))
U = self._transform_daal4py(X, whiten=True, check_X=False, scale_eigenvalues=True)
V = self.components_
U, V = svd_flip(U, V)
U = U.copy()
V = V.copy()
S = self.singular_values_.copy()
if n_components == 'mle':
n_components = \
_infer_dimension_(self.explained_variance_, n_samples, n_features)
elif 0 < n_components < 1.0:
# number of components for which the cumulated explained
# variance percentage is superior to the desired threshold
ratio_cumsum = stable_cumsum(self.explained_variance_ratio_)
n_components = np.searchsorted(ratio_cumsum, n_components) + 1
# Compute noise covariance using Probabilistic PCA model
# The sigma2 maximum likelihood (cf. eq. 12.46)
if n_components < min(n_features, n_samples):
self.noise_variance_ = self.explained_variance_[n_components:].mean()
else:
self.noise_variance_ = 0.
self.n_samples_, self.n_features_ = n_samples, n_features
self.components_ = self.components_[:n_components]
self.n_components_ = n_components
self.explained_variance_ = self.explained_variance_[:n_components]
self.explained_variance_ratio_ = \
self.explained_variance_ratio_[:n_components]
self.singular_values_ = self.singular_values_[:n_components]
return U, S, V
def _fit_full_vanilla(self, X, n_components):
"""Fit the model by computing full SVD on X"""
n_samples, n_features = X.shape
# Center data
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
U, S, V = np.linalg.svd(X, full_matrices=False)
# flip eigenvectors' sign to enforce deterministic output
U, V = svd_flip(U, V)
components_ = V
# Get variance explained by singular values
explained_variance_ = (S ** 2) / (n_samples - 1)
total_var = explained_variance_.sum()
explained_variance_ratio_ = explained_variance_ / total_var
# Postprocess the number of components required
if n_components == 'mle':
n_components = \
_infer_dimension_(explained_variance_, n_samples, n_features)
elif 0 < n_components < 1.0:
# number of components for which the cumulated explained
# variance percentage is superior to the desired threshold
ratio_cumsum = stable_cumsum(explained_variance_ratio_)
n_components = np.searchsorted(ratio_cumsum, n_components) + 1
# Compute noise covariance using Probabilistic PCA model
# The sigma2 maximum likelihood (cf. eq. 12.46)
if n_components < min(n_features, n_samples):
self.noise_variance_ = explained_variance_[n_components:].mean()
else:
self.noise_variance_ = 0.
self.n_samples_, self.n_features_ = n_samples, n_features
self.components_ = components_[:n_components]
self.n_components_ = n_components
self.explained_variance_ = explained_variance_[:n_components]
self.explained_variance_ratio_ = \
explained_variance_ratio_[:n_components]
self.singular_values_ = S[:n_components]
return U, S, V
def _fit_full(self, X, n_components):
n_samples, n_features = X.shape
_validate_n_components(n_components, n_samples, n_features)
if n_samples > n_features and (X.dtype == np.float64 or X.dtype == np.float32):
logging.info("sklearn.decomposition.PCA.fit: " + method_uses_daal)
return self._fit_full_daal4py(X, n_components)
else:
logging.info("sklearn.decomposition.PCA.fit: " + method_uses_sklearn)
return self._fit_full_vanilla(X, n_components)
def _fit(self, X):
"""Dispatch to the right submethod depending on the chosen solver."""
# Raise an error for sparse input.
# This is more informative than the generic one raised by check_array.
if issparse(X):
raise TypeError('PCA does not support sparse input. See '
'TruncatedSVD for a possible alternative.')
X = check_array(X, dtype=[np.float64, np.float32], ensure_2d=True,
copy=self.copy)
# Handle n_components==None
n_components = _process_n_components_None(
self.n_components, self.svd_solver, X.shape)
# Handle svd_solver
self._fit_svd_solver = self.svd_solver
if self._fit_svd_solver == 'auto':
# Small problem or n_components == 'mle', just call full PCA
if max(X.shape) <= 500 or n_components == 'mle':
self._fit_svd_solver = 'full'
elif n_components >= 1 and n_components < .8 * min(X.shape):
self._fit_svd_solver = 'randomized'
# This | |
"""
Connection to TaHoma API.
Connection to Somfy TaHoma REST API
"""
import json
import logging
import pprint
import traceback
import urllib.parse
import requests
BASE_URL = "https://tahomalink.com/enduser-mobile-web/enduserAPI/" # /doc for API doc
BASE_HEADERS = {"User-Agent": "mine"}
_LOGGER = logging.getLogger(__name__)
class TahomaApi:
"""Connection to TaHoma API."""
def __init__(self, userName, userPassword, **kwargs):
"""Initialize the TaHoma protocol.
:param userName: TaHoma username
:param userPassword: Password
:param kwargs: Ignore, only for unit test reasons
"""
self.__devices = {}
self.__gateway = {}
self.__location = {}
self.__cookie = ""
self.__logged_in = False
self.__events_registration = None
self.__username = userName
self.__password = <PASSWORD>
self.__setup = None
self.login()
def is_authenticated(self):
"""Return True if the user is authenticated."""
request = requests.get(
BASE_URL + "authenticated",
headers={"User-Agent": "mine", "Cookie": self.__cookie},
timeout=10,
)
if request.status_code == 200:
try:
result = request.json()
except ValueError as error:
raise Exception("Not a valid result, protocol error: " + str(error))
return result["authenticated"]
else:
raise Exception(
"Could not check authenticated: " + str(request.status_code)
)
def logout(self):
"""Logout from TaHoma API."""
if not self.__logged_in:
return True
request = requests.post(
BASE_URL + "logout",
headers={"User-Agent": "mine", "Cookie": self.__cookie},
timeout=10,
)
try:
result = request.json()
except ValueError as error:
raise Exception(
"Not a valid result for logout, "
+ "protocol error: "
+ request.status_code
+ " - "
+ request.reason
+ "("
+ error
+ ")"
)
if "error" in result.keys():
raise Exception("Could not logout: " + result["error"])
if request.status_code != 200:
raise Exception(
"Could not login, HTTP code: "
+ str(request.status_code)
+ " - "
+ request.reason
)
self.__logged_in = False
return True
def login(self):
"""Login to TaHoma API."""
if self.__logged_in:
return
login = {"userId": self.__username, "userPassword": <PASSWORD>}
header = BASE_HEADERS.copy()
request = requests.post(
BASE_URL + "login", data=login, headers=header, timeout=10
)
try:
result = request.json()
except ValueError as error:
raise Exception(
"Not a valid result for login, "
+ "protocol error: "
+ request.status_code
+ " - "
+ request.reason
+ "("
+ error
+ ")"
)
if "error" in result.keys():
raise Exception("Could not login: " + result["error"])
if request.status_code != 200:
raise Exception(
"Could not login, HTTP code: "
+ str(request.status_code)
+ " - "
+ request.reason
)
if "success" not in result.keys() or not result["success"]:
raise Exception("Could not login, no success")
cookie = request.headers.get("set-cookie")
if cookie is None:
raise Exception("Could not login, no cookie set")
self.__cookie = cookie
self.__logged_in = self.is_authenticated()
return self.__logged_in
def send_request(
self, method, url: str, headers, data=None, timeout: int = 10, retries: int = 3
):
"""Wrap the http requests and retries.
:param method: The method to use for the request: post, get, delete.
:param url: The url to send the POST to.
:param headers: The headers of the request.
:param data: The data of the request.
:param timeout: The timeout of the request.
:param retries: Maximum number of retries.
:return:
"""
if not self.is_authenticated():
if not self.login():
raise Exception("Could not get authenticated")
headers["Cookie"] = self.__cookie
self.send_request(method, url, headers, data, timeout, retries)
stack = pprint.pformat(traceback.extract_stack())
if "asyncio" in stack:
_LOGGER.warning("I/O stack trace:\n" + stack)
request = method(url, headers=headers, data=data, timeout=timeout)
if request.status_code == 200:
try:
result = request.json()
except ValueError as error:
raise Exception("Not a valid result, protocol error: " + str(error))
return result
elif retries == 0:
raise Exception(
"Maximum number of consecutive retries reached. Error is:\n"
+ request.text
)
else:
self.send_request(method, url, headers, data, timeout, retries - 1)
def get_user(self):
"""Get the user information from the server.
:return: a dict with all the information
:rtype: dict
raises ValueError in case of protocol issues
:Example:
"creationTime": <time>,
"lastUpdateTime": <time>,
"userId": "<email for login>",
"title": 0,
"firstName": "<First>",
"lastName": "<Last>",
"email": "<contact email>",
"phoneNumber": "<phone>",
"mobilePhone": "<mobile>",
"locale": "<two char country code>"
:Warning:
The type and amount of values in the dictionary can change any time.
"""
header = BASE_HEADERS.copy()
header["Cookie"] = self.__cookie
result = self.send_request(
requests.get, BASE_URL + "enduser/mainAccount", headers=header
)
return result
def get_setup(self):
"""Load the setup from the server.
Loads the configuration from the server, nothing will
be returned. After loading the configuration the devices
can be obtained through get_device and get_devices.
Also location and gateway will be set through this
method.
raises ValueError in case of protocol issues
:Seealso:
- get_device
- get_devices
- location
- gateway
"""
header = BASE_HEADERS.copy()
header["Cookie"] = self.__cookie
result = self.send_request(requests.get, BASE_URL + "setup", headers=header)
self.__setup = result
self._get_setup(result)
def _get_setup(self, result):
"""Process the results from the server."""
self.__devices = {}
if "devices" not in result.keys():
raise Exception("Did not find device definition.")
for device_data in result["devices"]:
device = Device(self, device_data)
self.__devices[device.url] = device
self.__location = result["location"]
self.__gateway = result["gateways"]
@property
def location(self):
"""Return the location information stored in your TaHoma box.
When the configuration has been loaded via get_setup this
method retrieves all the location details which have
been saved for your TaHoma box.
:return: a dict with all the information
:rtype: dict
:Example:
"creationTime": <time>,
"lastUpdateTime": <time>,
"addressLine1": "<street>",
"postalCode": "<zip>",
"city": "<city>",
"country": "<country>",
"timezone": "Europe/<city>",
"longitude": 2.343,
"latitude": 48.857,
"twilightMode": 2,
"twilightCity": "<city>",
"summerSolsticeDuskMinutes": 1290,
"winterSolsticeDuskMinutes": 990,
"twilightOffsetEnabled": False,
"dawnOffset": 0,
"duskOffset": 0
:Warning:
The type and amount of values in the dictionary can change any time.
:Seealso:
- get_setup
"""
return self.__location
@property
def gateway(self):
"""Return information about your TaHoma box.
When the configuration has been loaded via get_setup this
method retrieves all details your TaHoma box.
:return: a list of all gateways with a dict per gateway with
all the information
:rtype: list
:Example:
[{
"gatewayId": "1234-1234-1234",
"type": 15,
"placeOID": "12345678-1234-1234-1234-12345678",
"alive": True,
"timeReliable": True,
"connectivity": {
"status": "OK",
"protocolVersion": "8"
},
"up-to-date": True,
"functions": "INTERNET_AUTHORIZATION,SCENARIO_DOWNLOAD,
SCENARIO_AUTO_LAUNCHING,SCENARIO_TELECO_LAUNCHING,
INTERNET_UPLOAD,INTERNET_UPDATE,TRIGGERS_SENSORS",
"mode": "ACTIVE"
}]
:Warning:
The type and amount of values in the dictionary can change any time.
:Seealso:
- get_setup
"""
return self.__gateway
def get_devices(self):
"""Return all devices.
Which have been found with last get_setup request.
With a previous get_setup call the devices which have
been found will be returned.
:return: Returns a dictionary {device_url -> Device }
:rtype: dict
:Seealso:
- get_setup
"""
return self.__devices
def get_device(self, url):
"""Return a particular device.
Which have been found with the last get_setup request.
:param url: The device URL of the device to be returned.
:return: Return the device identified by url or None
:rtype: Device
:Seealso:
- get_setup
"""
return self.__devices[url]
def apply_actions(self, name_of_action, actions):
"""Start to execute an action or a group of actions.
This method takes a bunch of actions and runs them on your
TaHoma box.
:param name_of_action: the label/name for the action
:param actions: an array of Action objects
:return: the execution identifier **************
what if it fails
:rtype: string
raises ValueError in case of protocol issues
:Seealso:
- get_events
- get_current_executions
"""
header = BASE_HEADERS.copy()
header["Cookie"] = self.__cookie
header["Content-Type"] = "application/json"
actions_serialized = []
for action in actions:
actions_serialized.append(action.serialize())
data = {"label": name_of_action, "actions": actions_serialized}
json_data = json.dumps(data, indent=None, sort_keys=True)
result = self.send_request(
requests.post, BASE_URL + "exec/apply", header, json_data
)
if "execId" not in result.keys():
raise Exception("Could not run actions, missing execId.")
return result["execId"]
def get_events(self):
"""Return a set of events.
Which have been occurred since the last call of this method.
This method should be called regulary to get all occurring
Events. There are three different Event types/classes
which can be returned:
- DeviceStateChangedEvent, if any device changed it's state
due to an applied action or just because of other reasons
- CommandExecutionStateChangedEvent, a executed command goes
through several phases which can be followed
- ExecutionStateChangedEvent, ******** todo
:return: an array of Events or empty array
:rtype: list
raises ValueError in case of protocol issues
:Seealso:
- apply_actions
- launch_action_group
- get_history
"""
header = BASE_HEADERS.copy()
header["Cookie"] = self.__cookie
if self.__events_registration is None:
register_response = self.send_request(
requests.post, BASE_URL + "events/register", header
)
self.__events_registration = register_response["id"]
result = self.send_request(
requests.post,
BASE_URL + "events/" + self.__events_registration + "/fetch",
headers=header,
)
return self._get_events(result)
def _get_events(self, result):
"""Run unit tests."""
events = []
for event_data in result:
event = Event.factory(event_data)
if event is not None:
events.append(event)
| |
<gh_stars>1-10
# coding: utf-8
"""
Eclipse Kapua REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from swagger_client.api_client import ApiClient
class EndpointInfosApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_endpoint_info(self, scope_id, body, **kwargs): # noqa: E501
"""Create a EndpointInfo # noqa: E501
Creates a new EndpointInfo based on the information provided in EndpointInfoCreator parameter. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_endpoint_info(scope_id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scope_id: The ScopeId in which to create the EndpointInfo (required)
:param EndpointInfoCreator body: Provides the information for the new EndpointInfo to be created (required)
:return: EndpointInfo
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_endpoint_info_with_http_info(scope_id, body, **kwargs) # noqa: E501
else:
(data) = self.create_endpoint_info_with_http_info(scope_id, body, **kwargs) # noqa: E501
return data
def create_endpoint_info_with_http_info(self, scope_id, body, **kwargs): # noqa: E501
"""Create a EndpointInfo # noqa: E501
Creates a new EndpointInfo based on the information provided in EndpointInfoCreator parameter. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_endpoint_info_with_http_info(scope_id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scope_id: The ScopeId in which to create the EndpointInfo (required)
:param EndpointInfoCreator body: Provides the information for the new EndpointInfo to be created (required)
:return: EndpointInfo
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['scope_id', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_endpoint_info" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'scope_id' is set
if ('scope_id' not in params or
params['scope_id'] is None):
raise ValueError("Missing the required parameter `scope_id` when calling `create_endpoint_info`") # noqa: E501
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `create_endpoint_info`") # noqa: E501
collection_formats = {}
path_params = {}
if 'scope_id' in params:
path_params['scopeId'] = params['scope_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/xml', 'application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/xml', 'application/json']) # noqa: E501
# Authentication setting
auth_settings = ['kapuaAccessToken'] # noqa: E501
return self.api_client.call_api(
'/{scopeId}/endpointInfos', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='EndpointInfo', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def endpoint_info_count(self, scope_id, body, **kwargs): # noqa: E501
"""Counts the EndpointInfos # noqa: E501
Counts the EndpointInfos with the given EndpointInfoQuery parameter returning the number of matching EndpointInfos # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.endpoint_info_count(scope_id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scope_id: The ScopeId in which to count results (required)
:param EndpointInfoQuery body: The EndpointInfoQuery to use to filter count results (required)
:return: CountResult
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.endpoint_info_count_with_http_info(scope_id, body, **kwargs) # noqa: E501
else:
(data) = self.endpoint_info_count_with_http_info(scope_id, body, **kwargs) # noqa: E501
return data
def endpoint_info_count_with_http_info(self, scope_id, body, **kwargs): # noqa: E501
"""Counts the EndpointInfos # noqa: E501
Counts the EndpointInfos with the given EndpointInfoQuery parameter returning the number of matching EndpointInfos # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.endpoint_info_count_with_http_info(scope_id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scope_id: The ScopeId in which to count results (required)
:param EndpointInfoQuery body: The EndpointInfoQuery to use to filter count results (required)
:return: CountResult
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['scope_id', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method endpoint_info_count" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'scope_id' is set
if ('scope_id' not in params or
params['scope_id'] is None):
raise ValueError("Missing the required parameter `scope_id` when calling `endpoint_info_count`") # noqa: E501
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `endpoint_info_count`") # noqa: E501
collection_formats = {}
path_params = {}
if 'scope_id' in params:
path_params['scopeId'] = params['scope_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/xml', 'application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/xml', 'application/json']) # noqa: E501
# Authentication setting
auth_settings = ['kapuaAccessToken'] # noqa: E501
return self.api_client.call_api(
'/{scopeId}/endpointInfos/_count', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CountResult', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def endpoint_info_delete(self, scope_id, endpoint_info_id, **kwargs): # noqa: E501
"""Delete an EndpointInfo # noqa: E501
Deletes the EndpointInfo specified by the \"endpointInfoId\" path parameter. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.endpoint_info_delete(scope_id, endpoint_info_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scope_id: The ScopeId of the EndpointInfo to delete. (required)
:param str endpoint_info_id: The id of the EndpointInfo to be deleted (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.endpoint_info_delete_with_http_info(scope_id, endpoint_info_id, **kwargs) # noqa: E501
else:
(data) = self.endpoint_info_delete_with_http_info(scope_id, endpoint_info_id, **kwargs) # noqa: E501
return data
def endpoint_info_delete_with_http_info(self, scope_id, endpoint_info_id, **kwargs): # noqa: E501
"""Delete an EndpointInfo # noqa: E501
Deletes the EndpointInfo specified by the \"endpointInfoId\" path parameter. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.endpoint_info_delete_with_http_info(scope_id, endpoint_info_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scope_id: The ScopeId of the EndpointInfo to delete. (required)
:param str endpoint_info_id: The id of the EndpointInfo to be deleted (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['scope_id', 'endpoint_info_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method endpoint_info_delete" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'scope_id' is set
if ('scope_id' not in params or
params['scope_id'] is None):
raise ValueError("Missing the required parameter `scope_id` when calling `endpoint_info_delete`") # noqa: E501
# verify the required parameter 'endpoint_info_id' is set
if ('endpoint_info_id' not in params or
params['endpoint_info_id'] is None):
raise ValueError("Missing the required parameter `endpoint_info_id` when calling `endpoint_info_delete`") # noqa: E501
collection_formats = {}
path_params = {}
if 'scope_id' in params:
path_params['scopeId'] = params['scope_id'] # noqa: E501
if 'endpoint_info_id' in params:
path_params['endpointInfoId'] = params['endpoint_info_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['kapuaAccessToken'] # noqa: E501
return self.api_client.call_api(
'/{scopeId}/endpointInfos/{endpointInfoId}', 'DELETE',
path_params,
query_params,
| |
Space for comments on flags (str)
'typ', # Type of data (str='xyzf')
'sectime' # Secondary time variable (date2num)
]
NUMKEYLIST = KEYLIST[1:16]
# Empty key values at initiation of stream:
KEYINITDICT = {'time':0,'x':float('nan'),'y':float('nan'),'z':float('nan'),'f':float('nan'),
't1':float('nan'),'t2':float('nan'),'var1':float('nan'),'var2':float('nan'),
'var3':float('nan'),'var4':float('nan'),'var5':float('nan'),'dx':float('nan'),
'dy':float('nan'),'dz':float('nan'),'df':float('nan'),'str1':'-','str2':'-',
'str3':'-','str4':'-','flag':'0000000000000000-','comment':'-','typ':'xyzf',
'sectime':float('nan')}
FLAGKEYLIST = KEYLIST[:16]
# KEYLIST[:8] # only primary values with time
# KEYLIST[1:8] # only primary values without time
# Formats supported by MagPy read function:
PYMAG_SUPPORTED_FORMATS = {
'IAGA':['rw','IAGA 2002 text format'],
'WDC':['rw','World Data Centre format'],
'IMF':['rw', 'Intermagnet Format'],
'IAF':['rw', 'Intermagnet archive Format'],
'BLV':['rw','Baseline format Intermagnet'],
'IYFV':['rw','Yearly mean format Intermagnet'],
'DKA':['rw', 'K value format Intermagnet'],
'DIDD':['rw','Output format from MinGeo DIDD'],
'GSM19':['r', 'Output format from GSM19 magnetometer'],
'COVJSON':['rw', 'Coverage JSON'],
'JSON':['rw', 'JavaScript Object Notation'],
'LEMIHF':['r', 'LEMI text format data'],
'LEMIBIN':['r','Current LEMI binary data format'],
'LEMIBIN1':['r','Deprecated LEMI binary format at WIC'],
'OPT':['r', 'Optical hourly data from WIK'],
'PMAG1':['r','Deprecated ELSEC from WIK'],
'PMAG2':['r', 'Current ELSEC from WIK'],
'GDASA1':['r', 'GDAS binary format'],
'GDASB1':['r', 'GDAS text format'],
'RMRCS':['r', 'RCS data output from Richards perl scripts'],
'RCS':['r', 'RCS raw output'],
'METEO':['r', 'Winklbauer METEO files'],
'NEIC':['r', 'WGET data from USGS - NEIC'],
'LNM':['r', 'Thies Laser-Disdrometer'],
'IWT':['r', 'IWT Tiltmeter data'],
'LIPPGRAV':['r', 'Lippmann Tiltmeter data'],
'GRAVSG':['r', 'GWR TSF data'],
'CR800':['r', 'CR800 datalogger'],
'IONO':['r', 'IM806 Ionometer'],
'RADON':['r', 'single channel analyser gamma data'],
'USBLOG':['r', 'USB temperature logger'],
#'SERSIN':['r', '?'],
#'SERMUL':['r', '?'],
'PYSTR':['rw', 'MagPy full ascii'],
'AUTODIF':['r', 'Deprecated - AutoDIF ouput data'],
'AUTODIF_FREAD':['r', 'Deprecated - Special format for AutoDIF read-in'],
'PYBIN':['r', 'MagPy own binary format'],
'PYASCII':['rw', 'MagPy basic ASCII'],
'POS1TXT':['r', 'POS-1 text format output data'],
'POS1':['r', 'POS-1 binary output at WIC'],
'PMB':['r', 'POS pmb file'],
'QSPIN':['r', 'QSPIN ascii output'],
#'PYNC':['r', 'MagPy NetCDF variant (too be developed)'],
#'DTU1':['r', 'ASCII Data from the DTUs FGE systems'],
#'BDV1':['r', 'Budkov GDAS data variant'],
'GFZTMP':['r', 'GeoForschungsZentrum ascii format'],
'GFZKP':['r', 'GeoForschungsZentrum KP-Index format'],
'PHA':['r', 'Potentially Hazardous Asteroids (PHAs) from the International Astronomical Unions Minor Planet Center, (json, incomplete)'],
'PREDSTORM':['r','PREDSTORM space weather prediction data format'],
'CSV':['rw','comma-separated CSV data'],
'IMAGCDF':['rw','Intermagnet CDF Format'],
'PYCDF':['rw', 'MagPy CDF variant'],
'NOAAACE':['r', 'NOAA ACE satellite data format'],
'NETCDF':['r', 'NetCDF4 format, NOAA DSCOVR satellite data archive format'],
'LATEX':['w','LateX data'],
'CS':['r','Cesium G823'],
#'SFDMI':['r', 'San Fernando variometer'],
#'SFGSM':['r', 'San Fernando GSM90'],
'UNKOWN':['-','Unknown']
}
"""
PYMAG_SUPPORTED_FORMATS = {
'IAGA':'rw', # IAGA 2002 text format
'WDC':'rw', # World Data Centre format
'IMF':'rw', # Intermagnet Format
'IAF':'rw', # Intermagnet archive Format
'IMAGCDF', # Intermagnet CDF Format
'BLV', # Baseline format Intermagnet
'IYFV', # Yearly mean format Intermagnet
'DKA', # K value format Intermagnet
'DIDD', # Output format from DIDD
'GSM19', # Output format from GSM19 magnetometer
'COVJSON', # Coverage JavaScript Object Notation
'JSON', # JavaScript Object Notation
'LEMIHF', # LEMI text format data
'LEMIBIN', # Current LEMI binary data format at WIC
'LEMIBIN1', # Deprecated LEMI binary format at WIC
'OPT', # Optical hourly data from WIK
'PMAG1', # Deprecated ELSEC from WIK
'PMAG2', # Current ELSEC from WIK
'GDASA1', # ?
'GDASB1', # ?
'RMRCS', # RCS data output from Richards perl scripts
'RCS', # RCS data output from Richards perl scripts
'METEO', # RCS data output in METEO files
'NEIC', # WGET data from USGS - NEIC
'LNM', # LaserNiederschlagsMonitor files
'IWT', # Tiltmeter data files at cobs
'LIPPGRAV', # Lippmann Tiltmeter data files at cobs
'CR800', # Data from the CR800 datalogger
'IONO', # Data from IM806 Ionometer
'RADON', # ?
'USBLOG', # ?
'SERSIN', # ?
'SERMUL', # ?
'PYSTR', # MagPy full ascii
'AUTODIF', # AutoDIF ouput data
'AUTODIF_FREAD',# Special format for AutoDIF read-in
'PYCDF', # MagPy CDF variant
'PYBIN', # MagPy own format
'PYASCII', # MagPy basic ASCII
'POS1TXT', # POS-1 text format output data
'POS1', # POS-1 binary output at WIC
'PMB', # POS pmb output
'QSPIN', # QSpin output
'PYNC', # MagPy NetCDF variant (too be developed)
'DTU1', # ASCII Data from the DTU's FGE systems
'SFDMI', # ?
'SFGSM', # ?
'BDV1', # ?
'GFZKP', # GeoForschungsZentrum KP-Index format
'NOAAACE', # NOAA ACE satellite data format
'PREDSTORM' # PREDSTORM space weather prediction data format
'CSV', # comma-separated CSV data with isoformat date in first column
'LATEX', # LateX data
'CS', # ?
'UNKOWN' # 'Unknown'?
}
"""
# ----------------------------------------------------------------------------
# Part 3: Example files for easy access and tests
# ----------------------------------------------------------------------------
from pkg_resources import resource_filename
example1 = resource_filename('magpy', 'examples/example1.zip') #Zip compressed IAGA02
example2 = resource_filename('magpy', 'examples/example2.cdf') #MagPy CDF with F
example3 = resource_filename('magpy', 'examples/example3.txt') #PyStr Baseline
example4 = resource_filename('magpy', 'examples/example4.cdf') #MagPy CDF
example5 = resource_filename('magpy', 'examples/example5.sec') #Imag CDF
example6a = resource_filename('magpy', 'examples/example6a.txt') #DI file
example6b = resource_filename('magpy', 'examples/example6b.txt') #DI file
# ----------------------------------------------------------------------------
# Part 4: Main classes -- DataStream, LineStruct and
# PyMagLog (To be removed)
# ----------------------------------------------------------------------------
class DataStream(object):
"""
Creates a list object from input files /url data
data is organized in columns
keys are column identifier:
key in keys: see KEYLIST
A note on headers:
ALWAYS INITIATE STREAM WITH >>> stream = DataStream([],{}).
All available methods:
----------------------------
- stream.ext(self, columnstructure): # new version of extend function for column operations
- stream.add(self, datlst):
- stream.clear_header(self):
- stream.extend(self,datlst,header):
- stream.union(self,column):
- stream.findtime(self,time):
- stream._find_t_limits(self):
- stream._print_key_headers(self):
- stream._get_key_headers(self,**kwargs):
- stream.sorting(self):
- stream._get_line(self, key, value):
- stream._remove_lines(self, key, value):
- stream._remove_columns(self, keys):
- stream._get_column(self, key):
- stream._put_column(self, column, key, **kwargs):
- stream._move_column(self, key, put2key):
- stream._clear_column(self, key):
- stream._reduce_stream(self, pointlimit=100000):
- stream._aic(self, signal, k, debugmode=None):
- stream._get_k(self, **kwargs):
- stream._get_k_float(self, value, **kwargs):
- stream._get_max(self, key, returntime=False):
- stream._get_min(self, key, returntime=False):
- stream._gf(self, t, tau):
- stream._hf(self, p, x):
- stream._residual_func(self, func, y):
- stream._tau(self, period):
- stream._convertstream(self, coordinate, **kwargs):
- stream._det_trange(self, period):
- stream._is_number(self, s):
- stream._normalize(self, column):
- stream._testtime(self, time):
- stream._drop_nans(self, key):
- stream.aic_calc(self, key, **kwargs):
- stream.baseline(self, absolutestream, **kwargs):
- stream.bindetector(self,key,text=None,**kwargs):
- stream.calc_f(self, **kwargs):
- stream.cut(self,length,kind=0,order=0):
- stream.dailymeans(self):
- stream.date_offset(self, offset):
- stream.delta_f(self, **kwargs):
- stream.dict2stream(self,dictkey='DataBaseValues')
- stream.differentiate(self, **kwargs):
- stream.eventlogger(self, key, values, compare=None, stringvalues=None, addcomment=None, debugmode=None):
- stream.extract(self, key, value, compare=None, debugmode=None):
- stream.extrapolate(self, start, end):
- stream.filter(self, **kwargs):
- stream.fit(self, keys, **kwargs):
- stream.flag_outlier(self, **kwargs):
- stream.flag_stream(self, key, flag, comment, startdate, enddate=None, samplingrate):
- stream.func2stream(self,function,**kwargs):
- stream.func_add(self,function,**kwargs):
- stream.func_subtract(self,function,**kwargs):
- stream.get_gaps(self, **kwargs):
- stream.get_sampling_period(self):
- stream.samplingrate(self, **kwargs):
- stream.integrate(self, **kwargs):
- stream.interpol(self, keys, **kwargs):
- stream.k_fmi(self, **kwargs):
- stream.mean(self, key, **kwargs):
- stream.multiply(self, factors):
- stream.offset(self, offsets):
- stream.randomdrop(self, percentage=None, fixed_indicies=None):
- stream.remove(self, starttime=starttime, endtime=endtime):
- stream.remove_flagged(self, **kwargs):
- stream.resample(self, keys, **kwargs):
- stream.rotation(self,**kwargs):
- stream.scale_correction(self, keys, scales, **kwargs):
- stream.smooth(self, keys, **kwargs):
- stream.steadyrise(self, key, timewindow, **kwargs):
- stream.stream2dict(self,dictkey='DataBaseValues')
- stream.stream2flaglist(self, userange=True, flagnumber=None, keystoflag=None, sensorid=None, comment=None)
- stream.trim(self, starttime=None, endtime=None, newway=False):
- stream.variometercorrection(self, variopath, thedate, **kwargs):
- stream.write(self, filepath, **kwargs):
Application methods:
----------------------------
- stream.aic_calc(key) -- returns stream (with !var2! filled with aic values)
- stream.baseline() -- calculates baseline correction for input stream (datastream)
- stream.dailymeans() -- for DI stream - obtains variometer corrected means fo basevalues
- stream.date_offset() -- Corrects the time column of the selected stream by the offst
- stream.delta_f() -- Calculates the difference of x+y+z to f
- stream.differentiate() -- returns stream (with !dx!,!dy!,!dz!,!df! filled by derivatives)
- stream.extrapolate() -- read absolute stream and extrapolate the data
- stream.fit(keys) -- returns function
- stream.filter() -- returns stream (changes sampling_period; in case of fmi ...)
- stream.find_offset(stream_a, stream_b) -- Finds offset of two data streams. (Not optimised.)
- stream.flag_stream() -- Add flags to specific times or time ranges
- stream.func2stream() -- Combine stream and function (add, subtract, etc)
- stream.func_add() -- Add a function to the selected values of the data stream
- stream.func_subtract() -- Subtract a function from the selected values of the data stream
- stream.get_gaps() -- Takes the dominant sample frequency and fills non-existing time steps
- stream.get_sampling_period() -- returns the dominant sampling frequency in unit ! days !
- stream.integrate() -- returns stream (integrated vals at !dx!,!dy!,!dz!,!df!)
- stream.interpol(keys) -- returns function
- stream.k_fmi() -- Calculating k values following the fmi approach
- stream.linestruct2ndarray() -- converts linestrcut data to ndarray. should be avoided
- stream.mean() -- Calculates mean values for the specified key, Nan's are regarded for
- stream.offset() -- Apply constant offsets to elements of the datastream
- stream.plot() -- plot keys from stream
- stream.powerspectrum() | |
<reponame>teammdm/seminar
from Bio import SeqIO
import numpy as np
import os
import io
import h5py
from queue import Queue
import signal_pb2
import nanopolish_pb2
import raw_current_pb2
class ResquiggledFAST5():
""" <2do> dokumentacija"""
def __init__(self, path_to_file):
if not os.path.isfile(path_to_file):
raise FileNotFoundError("The file {} could not be found".format(path_to_file))
self._path_to_file = path_to_file
self._file_handle = h5py.File(path_to_file, 'r')
self._key_dict_hierarchical = dict()
self._key_dict_flat = dict()
self._generate_key_dict()
self._sequence = None
self._events = None
self._nanopolish_events = None
self._raw_current = None
self._discrete_signal = None
self._continous_signal = None
self._add_conversion_data()
def get_fasta(self):
"""Returns fasta reads read within the fast5 file.
Parameters
----------
Returns
-------
fasta : SeqRecord list
List of SeqRecords objects
"""
return self._extract_file_format('Fasta')
def get_fastq(self):
"""Returns fastq reads read within the fast5 file.
Parameters
----------
Returns
-------
fastq : SeqRecord list
List of SeqRecords objects
"""
return self._extract_file_format('Fastq')
def get_signal_discrete(self):
"""Returns all discrete signal values associated with the current file.
Parameters
----------
Returns
-------
signal : np.array
Discrete signal signal values from file
"""
if self._discrete_signal is not None:
return self._discrete_signal
signal = self._key_dict_flat['Signal']
self._discrete_signal = np.array(signal)
return self._discrete_signal
def get_signal_continuos(self):
"""Returns all continuous/raw signal values associated with the current file.
Parameters
----------
Returns
-------
signal : np.array
Continuous/ras signal values from file
"""
if self._continous_signal is not None:
return self._continous_signal
discrete_signal = self.get_signal_discrete()
info = self.get_general_info()
rng = info.range
digitisation = info.digitisation
raw_unit = rng / digitisation
offset = info.offset
self._continous_signal = np.array(list(map(lambda x: (x + offset) * raw_unit, discrete_signal)))
return self._continous_signal
def get_nucleotide_positions(self, nucleotide):
"""Returns all indices of positions where the specified nucleotide has been detected.
Parameters
----------
nucleotide : string
Nucleotide name
Returns
-------
indices : numpy.array
Numpy vector containing the indices.
"""
events = self.get_events()
indices = []
for event in events:
if event.base == nucleotide:
indices += [i for i in range(event.start, event.start + event.length)]
return np.array(indices)
#TODO
def get_kmer_positions(self, kmer):
"""Returns all indices of positions where the specified kmer has been detected.
Parameters
----------
kmer : string
Kmer representation
Returns
-------
indices : numpy.array
Numpy vector containing the indices.
"""
pass
def get_nucleotide_intervals(self, nucleotide):
"""Returns all start and end indices of intervals where the specified nucleotide has been detected.
Parameters
----------
nucleotide : string
Nucleotide representation
Returns
-------
indices : numpy.array
Numpy array containing the indices.
"""
events = self.get_events()
indices = []
for event in events:
if event.base == nucleotide:
indices.append((event.start, event.start + event.length))
return np.array(indices)
#TODO
def get_kmer_intervals(self, kmer):
"""Returns all start and end indices of intervals where the specified kmer has been detected.
Parameters
----------
kmer : string
Kmer representation
Returns
-------
indices : numpy.array
Numpy vector containing the indices.
"""
pass
def get_nucleotide_mean_stdev(self, nuclotide):
"""Returns mean and standard deviation of the specified nucleotide in the signal.
Parameters
----------
nucleotide : string
Nucleotide representation
Returns
-------
params : tuple
Tuple of format (mean, stdev)
"""
events = self.get_events()
mean_sum = 0.0
stdev_sum = 0.0
counter = 0
for event in events:
if event.base == nucleotide:
mean_sum += event.norm_mean
stdev_sum += event.norm_stdev
counter += 1
return (mean_sum / counter, stdev_sum / counter)
#TODO
def get_kmer_mean_sd(self, kmer):
"""Returns mean and standard deviation of the specified kmer in the signal.
Parameters
----------
kmer : string
Kmer representation
Returns
-------
params : tuple
Tuple of format (mean, std_dev)
"""
pass
def get_nucleotide_average_duration(self, nucleotide):
"""Returns average duration of the specified nucleotide in the signal.
Parameters
----------
nucleotide : string
Nucleotide representation
Returns
-------
duration : float
Duration of nucleotide
"""
events = self.get_events()
duration_sum = 0
counter = 0
for event in events:
if event.base == nucleotide:
duration_sum += event.length
counter += 1
return float(duration_sum) / counter
#TODO
def get_kmer_average_duration(self, kmer):
"""Returns average duration of the specified kmer in the signal.
Parameters
----------
kmer : string
Kmer representation
Returns
-------
duration : float
Duration of kmer
"""
pass
@staticmethod
def _create_raw_current(signal, fast5_info):
"""MOVE TO SEPARATE FILE"""
curr = raw_current_pb2.Current()
curr.digitisation = fast5_info.digitisation
curr.offset = fast5_info.offset
curr.range = fast5_info.range
curr.sampling_rate = fast5_info.sampling_rate
curr.currents.extend(signal)
return curr
@staticmethod
def _create_Event(norm_mean, norm_stdev, start, length, base):
"""MOVE TO SEPARATE FILE"""
event = signal_pb2.Event()
event.norm_mean = norm_mean
event.norm_stdev = norm_stdev
event.start = start
event.length = length
event.base = base
return event
@staticmethod
def _create_nanopolish_event(events, index, resquiggle_info, fast5_info, aligns):
"""MOVE TO SEPARATE FILE"""
event_nano = nanopolish_pb2.EventAlign.Event()
event = events[5]
increasing = (resquiggle_info.mapped_start < resquiggle_info.mapped_end)
#could also be clipped_bases_start, end
event_nano.index = (resquiggle_info.mapped_start + index) if increasing else (resquiggle_info.mapped_start - index)
event_nano.level_mean = np.average(list(map(lambda event: np.average(list(map(lambda x: x, event.samples))), events)))
event_nano.stdv = np.average(list(map(lambda event: np.std(list(map(lambda x: x, event.samples))), events)))
event_nano.length = np.average(list(map(lambda x: x.length / fast5_info.sampling_rate, events)))
# event_nano.level_mean = np.average(list(map(lambda x: x, event.samples)))
# event_nano.stdv = np.std(list(map(lambda x: x, event.samples)))
# event_nano.length = event.length / fast5_info.sampling_rate
event_nano.start_idx = resquiggle_info.mapped_start + event.start
event_nano.end_idx = resquiggle_info.mapped_start + event.start + event.length
event_nano.samples.extend(event.samples)
event_nano.standardized_level = event.norm_mean
kmer = "".join(list(map(lambda x: x.base, events[0:6])))
if kmer not in aligns:
event_align = nanopolish_pb2.EventAlign()
event_align.contig = resquiggle_info.mapped_chrom
event_align.position = index
event_align.read_index = 0 #TODO
event_align.reference_kmer = kmer
event_align.strand = resquiggle_info.mapped_strand == '+'
aligns[kmer] = event_align
event_align = aligns[kmer]
def reverseComp(char):
if char == 'T':
return 'A'
if char == 'A':
return 'T'
if char == 'C':
return 'G'
if char == 'G':
return 'C'
#either model or reference is from Analyses.BaseCall
if 'c' in resquiggle_info.mapped_chrom:
event_align.model_kmer = "".join(list(map(reverseComp, kmer[::-1])))
else:
event_align.model_kmer = kmer
event_align.events.extend([event_nano])
event_align.model_mean = np.average(list(map(lambda x: x.level_mean, event_align.events)))
event_align.model_stdv = np.average(list(map(lambda x: x.stdv, event_align.events)))
return event_align
@staticmethod
def _create_ResquiggleInfo(attrs):
"""MOVE TO SEPARATE FILE"""
info = signal_pb2.ResquiggleInfo()
info.clipped_bases_end = int(attrs['clipped_bases_end'])
info.clipped_bases_start = int(attrs['clipped_bases_start'])
info.mapped_chrom = attrs['mapped_chrom']
info.mapped_end = int(attrs['mapped_end'])
info.mapped_start = int(attrs['mapped_start'])
info.mapped_strand = attrs['mapped_strand']
info.num_deletions = int(attrs['num_deletions'])
info.num_insertions = int(attrs['num_insertions'])
info.num_matches = int(attrs['num_matches'])
info.num_mismatches = int(attrs['num_mismatches'])
return info
@staticmethod
def _create_Fast5Info(template_attrs, channel_id_attrs):
"""MOVE TO SEPARATE FILE"""
info = signal_pb2.Fast5Info()
info.lower_lim = float(template_attrs['lower_lim'])
info.norm_type = template_attrs['norm_type']
info.outlier_threshold = float(template_attrs['outlier_threshold'])
info.rna = bool(template_attrs['rna'])
info.scale = float(template_attrs['scale'])
info.shift = float(template_attrs['shift'])
info.signal_match_score = float(template_attrs['signal_match_score'])
info.status = template_attrs['status']
info.upper_lim = float(template_attrs['upper_lim'])
info.channel_number = channel_id_attrs['channel_number']
info.digitisation = float(channel_id_attrs['digitisation'])
info.offset = float(channel_id_attrs['offset'])
info.range = float(channel_id_attrs['range'])
info.sampling_rate = float(channel_id_attrs['sampling_rate'])
return info
@staticmethod
def _serialize_Message(message, path):
"""MOVE TO SEPARATE FILE"""
f = open(path, "wb")
f.write(message.serializeToString())
f.close()
return
def get_resquiggle_info(self):
"""Returns resquiggle information as ResquiggleInfo object.
Parameters
----------
Return
info : ResquiggleInfo
Object containing the information as object variables
"""
attrs = self._key_dict_flat['Alignment'].attrs
return ResquiggledFAST5._create_ResquiggleInfo(attrs)
def get_general_info(self):
"""Returns general information about the sequence as well as tombo parameters
as a Fast5Info object.
Parameters
----------
Returns
-------
info : Fast5Info
"""
template_attrs = self._key_dict_flat['BaseCalled_template'].attrs
channel_id_attrs = self._key_dict_flat['channel_id'].attrs
return ResquiggledFAST5._create_Fast5Info(template_attrs, channel_id_attrs)
def get_nanopolish_events(self):
"""Returns all Event objects in sequential order associated with the fast5 file converted to nanopolish output.
Parameters
----------
Returns
-------
events : [Event] ili numpy.array
All events in sequential order
"""
if self._nanopolish_events is not None:
return self._nanopolish_events
events = self.get_events()
nano_events = []
aligns = {}
for i in range(3,len(events)-3):
nano_events.append(self._create_nanopolish_event(events[i-3:i+3], i-3, self.get_resquiggle_info(), self.get_general_info(), aligns))
self._nanopolish_events = nano_events
return self._nanopolish_events
def get_raw_current_data(self):
"""Returns Current object retrieved from the signal in the Fast5 file
Parameters
----------
Returns
-------
events : Current
Current file
"""
if self._raw_current is not None:
return self._raw_current
signal = self.get_signal_continuos()
self._raw_current = ResquiggledFAST5._create_raw_current(signal, self.get_general_info())
return self._raw_current
def get_events(self):
"""Returns all Event objects in sequential order associated with the fast5 file.
Parameters
----------
Returns
-------
events : [Event] ili numpy.array
All events in sequential order
"""
if self._events is not None:
return self._events
alignment = self._key_dict_flat['BaseCalled_template']
events = np.array(alignment.get('Events'))
signal = self.get_signal_continuos() # events is a vector of shape (x, )
events_list = []
for row in events:
event = ResquiggledFAST5._create_Event(*row)
event.samples.extend(signal[event.start:(event.start+event.length)])
events_list.append(event)
self._events = np.array(events_list)
return self._events
def _convert_to_raw(self, signals):
"""Converts given np.array of discrete (int) signals in to their continuous/raw (float) counterparts.
Parameters
----------
signals : np.array
Array of discrete signals
Returns
-------
raw_signals : np.array
Array of raw/continuous signals
"""
return | |
from collections import OrderedDict
from typing import Type, Union
from flask import jsonify
from werkzeug.wrappers import Response
from werkzeug.exceptions import BadRequest, InternalServerError
from marshmallow import Schema, EXCLUDE, RAISE
from marshmallow.fields import List
from marshmallow.exceptions import ValidationError
from flask_restx.model import Model
from flask_restx import fields, reqparse, inputs
from flask_accepts.utils import for_swagger, get_default_model_name, is_list_field, ma_field_to_reqparse_argument
def accepts(
*args,
model_name: str = None,
schema: Union[Schema, Type[Schema], None] = None,
query_params_schema: Union[Schema, Type[Schema], None] = None,
headers_schema: Union[Schema, Type[Schema], None] = None,
many: bool = False,
api=None,
use_swagger: bool = True,
):
"""
Wrap a Flask route with input validation using a combination of reqparse from
Flask-restx and/or Marshmallow schemas
Args:
*args: any number of dictionaries containing parameters to pass to
reqparse.RequestParser().add_argument(). A single string parameter may also be
provided that is used as the model name. By default these parameters
will be parsed using the default logic however, if a schema is provided then
the JSON body is assumed to correspond to it and will not be parsed for query params.
model_name (str): the name to pass to api.Model, can optionally be provided as a str argument to *args
schema (Marshmallow.Schema, optional): A Marshmallow Schema that will be used to parse JSON
data from the request body and store in request.parsed_obj. Defaults to None.
query_params_schema (Marshmallow.Schema, optional): A Marshmallow Schema that will be used to parse
data from the request query params and store in request.parsed_query_params. These values will
also be added to the `request.args` dict. Defaults to None.
headers_schema (Marshmallow.Schema, optional): A Marshmallow Schema that will be used to parse
data from the request header and store in request.parsed_headers. Defaults to None.
many (bool, optional): The Marshmallow schema `many` parameter, which will
return a list of the corresponding schema objects when set to True. This
flag corresopnds only to the request body schema, and not the
`query_params_schema` or `headers_schema` arguments.
Returns:
The wrapped route
"""
_check_deprecate_many(many)
# If an api was passed in, we need to use its parser so Swagger is aware
if api:
_parser = api.parser()
else:
_parser = reqparse.RequestParser(bundle_errors=True)
query_params = [arg for arg in args if isinstance(arg, dict)]
for arg in args: # check for positional string-arg, which is the model name
if isinstance(arg, str):
model_name = arg
break
# Handles query params passed in as positional arguments.
for qp in query_params:
params = {**qp, "location": qp.get("location") or "values"}
if qp["type"] == bool:
# mapping native bool is necessary so that string "false" is not truthy
# https://flask-restx.readthedocs.io/en/stable/parsing.html#advanced-types-handling
params["type"] = inputs.boolean
_parser.add_argument(**params)
# Handles request body schema.
if schema:
schema = _get_or_create_schema(schema, many=many)
# Handles query params schema.
if query_params_schema:
query_params_schema = _get_or_create_schema(query_params_schema, unknown=EXCLUDE)
for name, field in query_params_schema.fields.items():
params = {**ma_field_to_reqparse_argument(field), "location": "values"}
_parser.add_argument(field.data_key or name, **params)
# Handles headers schema.
if headers_schema:
headers_schema = _get_or_create_schema(headers_schema, unknown=EXCLUDE)
for name, field in headers_schema.fields.items():
params = {**ma_field_to_reqparse_argument(field), "location": "headers"}
_parser.add_argument(field.data_key or name, **params)
def decorator(func):
from functools import wraps
# Check if we are decorating a class method
_IS_METHOD = _is_method(func)
@wraps(func)
def inner(*args, **kwargs):
from flask import request
error = schema_error = None
# Handle arguments
try:
request.parsed_args = _parser.parse_args()
except Exception as e:
error = e
# Handle Marshmallow schema for request body
if schema:
try:
obj = schema.load(request.get_json(force=True))
request.parsed_obj = obj
except ValidationError as ex:
schema_error = ex.messages
if schema_error:
error = error or BadRequest(
f"Error parsing request body: {schema_error}"
)
if hasattr(error, "data"):
error.data["errors"].update({"schema_errors": schema_error})
else:
error.data = {"schema_errors": schema_error}
# Handle Marshmallow schema for query params
if query_params_schema:
request_args = _convert_multidict_values_to_schema(
request.args,
query_params_schema)
try:
obj = query_params_schema.load(request_args)
request.parsed_query_params = obj
except ValidationError as ex:
schema_error = ex.messages
if schema_error:
error = error or BadRequest(
f"Error parsing query params: {schema_error}"
)
if hasattr(error, "data"):
error.data["errors"].update({"schema_errors": schema_error})
else:
error.data = {"schema_errors": schema_error}
# Handle Marshmallow schema for headers
if headers_schema:
request_headers = _convert_multidict_values_to_schema(
request.headers,
headers_schema)
try:
obj = headers_schema.load(request_headers)
request.parsed_headers = obj
except ValidationError as ex:
schema_error = ex.messages
if schema_error:
error = error or BadRequest(
f"Error parsing headers: {schema_error}"
)
if hasattr(error, "data"):
error.data["errors"].update({"schema_errors": schema_error})
else:
error.data = {"schema_errors": schema_error}
# If any parsing produced an error, combine them and re-raise
if error:
raise error
return func(*args, **kwargs)
# Add Swagger
if api and use_swagger and _IS_METHOD:
if schema:
body = for_swagger(
schema=schema,
model_name=model_name or get_default_model_name(schema),
api=api,
operation="load",
)
if schema.many is True:
body = [body]
params = {
"expect": [body, _parser],
}
inner = api.doc(**params)(inner)
elif _parser:
inner = api.expect(_parser)(inner)
return inner
return decorator
def responds(
*args,
model_name: str = None,
schema=None,
many: bool = False,
api=None,
envelope=None,
status_code: int = 200,
validate: bool = False,
description: str = None,
use_swagger: bool = True,
):
"""
Serialize the output of a function using the Marshmallow schema to dump the results.
Note that `schema` should be the type, not an instance -- the `responds` decorator
will internally handle creation of the schema. If the outputted value is already of
type flask.Response, it will be passed along without further modification.
Args:
schema (bool, optional): Marshmallow schema with which to serialize the output
of the wrapped function.
many (bool, optional): (DEPRECATED) The Marshmallow schema `many` parameter, which will
return a list of the corresponding schema objects when set to True.
Returns:
The output of schema(many=many).dumps(<return value>) of the wrapped function
"""
from functools import wraps
from flask_restx import reqparse
_check_deprecate_many(many)
# If an api was passed in, we need to use its parser so Swagger is aware
if api:
_parser = api.parser()
else:
_parser = reqparse.RequestParser(bundle_errors=True)
query_params = [arg for arg in args if isinstance(arg, dict)]
for arg in args: # check for positional string-arg, which is the model name
if isinstance(arg, str):
model_name = arg
break
for qp in query_params:
_parser.add_argument(**qp, location="values")
ordered = None
if schema:
schema = _get_or_create_schema(schema, many=many)
ordered = schema.ordered
model_name = model_name or get_default_model_name(schema)
model_from_parser = _model_from_parser(model_name=model_name, parser=_parser)
def decorator(func):
# Check if we are decorating a class method
_IS_METHOD = _is_method(func)
@wraps(func)
def inner(*args, **kwargs):
rv = func(*args, **kwargs)
# If a Flask response has been made already, it is passed through unchanged
if isinstance(rv, Response):
return rv
if schema:
serialized = schema.dump(rv)
# Validate data if asked to (throws)
if validate:
errs = schema.validate(serialized)
if errs:
raise InternalServerError(
description="Server attempted to return invalid data"
)
# Apply the flask-restx mask after validation
serialized = _apply_restx_mask(serialized)
else:
from flask_restx import marshal
serialized = marshal(rv, model_from_parser)
if envelope:
serialized = OrderedDict([(envelope, serialized)]) if ordered else {envelope: serialized}
if not _is_method(func):
# Regular route, need to manually create Response
return jsonify(serialized), status_code
return serialized, status_code
# Add Swagger
if api and use_swagger and _IS_METHOD:
if schema:
api_model = for_swagger(
schema=schema, model_name=model_name, api=api, operation="dump"
)
if schema.many is True:
api_model = [api_model]
inner = _document_like_marshal_with(
api_model, status_code=status_code, description=description,
)(inner)
elif _parser:
api.add_model(model_name, model_from_parser)
inner = _document_like_marshal_with(
model_from_parser, status_code=status_code, description=description
)(inner)
return inner
return decorator
def _apply_restx_mask(serialized):
from flask import current_app, request
from flask_restx.mask import apply as apply_mask
mask_header = current_app.config.get("RESTX_MASK_HEADER", "X-Fields")
mask = request.headers.get(mask_header)
return apply_mask(serialized, mask) if mask else serialized
def _check_deprecate_many(many: bool = False):
if many:
import warnings
warnings.simplefilter("always", DeprecationWarning)
warnings.warn(
"The 'many' parameter is deprecated in favor of passing these "
"arguments to an actual instance of Marshmallow schema (i.e. "
"prefer @responds(schema=MySchema(many=True)) instead of "
"@responds(schema=MySchema, many=True))",
DeprecationWarning,
stacklevel=3,
)
def _get_or_create_schema(
schema: Union[Schema, Type[Schema]], many: bool = False, unknown: str = RAISE
) -> Schema:
if isinstance(schema, Schema):
return schema
return schema(many=many, unknown=unknown)
def _model_from_parser(model_name: str, parser: reqparse.RequestParser) -> Model:
from flask_restx import fields
base_type_map = {
"integer": fields.Integer,
"string": fields.String,
"number": fields.Float,
}
type_factory = {
"integer": lambda arg: base_type_map["integer"],
"string": lambda arg: base_type_map["string"],
"number": lambda arg: base_type_map["number"],
"array": lambda arg: fields.List(base_type_map[arg["items"]["type"]]),
}
return Model(
model_name,
{arg["name"]: type_factory[arg["type"]](arg) for arg in parser.__schema__},
)
def merge(first: dict, second: dict) -> dict:
return {**first, **second}
def _document_like_marshal_with(
values, status_code: int = 200, description: str = None
):
description = description or "Success"
def inner(func):
doc = {"responses": {status_code: (description, values)}, "__mask__": | |
# DescendantReportExtra addon
#
# Notes by <NAME>, 2021
# Much of the code for this addon was copied from two existing Gramps plugins:
#
# gramps/plugins/descendtree.py
# gramps/plugins/ancestortree.py
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 <NAME>
# Copyright (C) 2007-2012 <NAME>
# Copyright (C) 2010 <NAME>
# Copyright (C) 2009-2010 <NAME>
# Copyright (C) 2014 <NAME>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Reports/Graphical Reports/Hourglass Tree
Reports/Graphical Reports/Family Hourglass Tree
"""
#------------------------------------------------------------------------
#
# Gramps modules
#
#------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.sgettext
from gramps.gen.errors import ReportError
from gramps.gen.plug.menu import (TextOption, NumberOption, BooleanOption,
EnumeratedListOption, StringOption,
PersonOption, FamilyOption)
from gramps.gen.plug.report import Report, MenuReportOptions, stdoptions
from gramps.gen.plug.report import utils
from gramps.gen.plug.docgen import (FontStyle, ParagraphStyle, GraphicsStyle,
FONT_SANS_SERIF, PARA_ALIGN_CENTER)
from gramps.plugins.lib.libtreebase import *
from gramps.plugins.lib.librecurse import AscendPerson
from gramps.gen.proxy import CacheProxyDb
from gramps.gen.display.name import displayer as _nd
from gramps.gen.utils.db import family_name
PT2CM = utils.pt2cm
#------------------------------------------------------------------------
#
# Constants
#
#------------------------------------------------------------------------
_BORN = _("b.", "birth abbreviation"),
_DIED = _("d.", "death abbreviation"),
_MARR = _("m.", "marriage abbreviation"),
_RPT_NAME = 'hourglass_chart'
LVL_GEN, LVL_INDX, LVL_Y = range(3)
LVL_ISDESC = 1
#------------------------------------------------------------------------
#
# Box classes
#
#------------------------------------------------------------------------
class DescendantBoxBase(BoxBase):
"""
Base for all descendant boxes.
Set the boxstr and some new attributes that are needed
"""
def __init__(self, boxstr, descendant_tree):
BoxBase.__init__(self)
self.boxstr = boxstr
self.linked_box = None
self.father = None
self.in_descendant_tree = descendant_tree
def calc_text(self, database, person, family):
""" A single place to calculate box text """
gui = GuiConnect()
calc = gui.calc_lines(database)
self.text = calc.calc_lines(person, family,
gui.working_lines(self))
def boxes_in_ancestor_tree(canvas):
return [b for b in canvas.boxes if not b.in_descendant_tree]
def boxes_in_descendant_tree(canvas):
return [b for b in canvas.boxes if b.in_descendant_tree]
class PersonBox(DescendantBoxBase):
"""
Calculates information about the box that will print on a page
"""
def __init__(self, level, descendant_tree):
DescendantBoxBase.__init__(self, "CG2-box", descendant_tree)
self.level = level
def set_bold(self):
""" update me to a bolded box """
self.boxstr = "CG2b-box"
def __lt__(self, other):
return self.level[LVL_Y] < other.level[LVL_Y]
class FamilyBox(DescendantBoxBase):
"""
Calculates information about the box that will print on a page
"""
def __init__(self, level, descendant_tree):
DescendantBoxBase.__init__(self, "CG2-fam-box", descendant_tree)
self.level = level
def __lt__(self, other):
return self.level[LVL_Y] < other.level[LVL_Y]
class PlaceHolderBox(BoxBase):
"""
I am a box that does not print. I am used to make sure information
does not run over areas that we don't want information (boxes)
"""
def __init__(self, level):
BoxBase.__init__(self)
self.boxstr = "None"
self.level = level
self.line_to = None
self.linked_box = None
def calc_text(self, database, person, family):
""" move along. Nothing to see here """
return
#------------------------------------------------------------------------
#
# Titles Class(es)
#
#------------------------------------------------------------------------
class DescendantTitleBase(TitleBox):
def __init__(self, dbase, doc, locale, name_displayer,
boxstr="CG2-Title-box"):
self._nd = name_displayer
TitleBox.__init__(self, doc, boxstr)
self.database = dbase
self._ = locale.translation.sgettext
def descendant_print(self, person_list, person_list2=[]):
""" calculate the title
Person_list will always be passed
If in the Family reports and there are two families, person_list2
will be used.
"""
if len(person_list) == len(person_list2) == 1:
person_list = person_list + person_list2
person_list2 = []
names = self._get_names(person_list, self._nd)
if person_list2:
names2 = self._get_names(person_list2, self._nd)
if len(names) + len(names2) == 3:
if len(names) == 1:
title = self._("Hourglass Chart for %(person)s and "
"%(father1)s, %(mother1)s") % {
'person': names[0],
'father1': names2[0],
'mother1': names2[1],
}
else: # Should be 2 items in names list
title = self._("Hourglass Chart for %(person)s, "
"%(father1)s and %(mother1)s") % {
'father1': names[0],
'mother1': names[1],
'person': names2[0],
}
else: # Should be 2 items in both names and names2 lists
title = self._("Hourglass Chart for %(father1)s, %(father2)s "
"and %(mother1)s, %(mother2)s") % {
'father1': names[0],
'mother1': names[1],
'father2': names2[0],
'mother2': names2[1],
}
else: # No person_list2: Just one family
if len(names) == 1:
title = self._(
"Hourglass Chart for %(person)s") % {'person': names[0]}
else: # Should be two items in names list
title = self._("Hourglass Chart for %(father)s and "
"%(mother)s") % {
'father': names[0],
'mother': names[1],
}
return title
def get_parents(self, family_id):
""" For a family_id, return the father and mother """
family1 = self.database.get_family_from_gramps_id(family_id)
father_h = family1.get_father_handle()
mother_h = family1.get_mother_handle()
parents = [self.database.get_person_from_handle(handle)
for handle in [father_h, mother_h] if handle]
return parents
class TitleNone(TitleNoDisplay):
"""No Title class for the report """
def __init__(self, dbase, doc, locale):
TitleNoDisplay.__init__(self, doc, "CG2-Title-box")
self._ = locale.translation.sgettext
def calc_title(self, persons):
"""Calculate the title of the report"""
#we want no text, but need a text for the TOC in a book!
self.mark_text = self._('Hourglass Chart')
self.text = ''
class TitleDPY(DescendantTitleBase):
"""Hourglass (Person yes start with parents) Chart
Title class for the report """
def __init__(self, dbase, doc, locale, name_displayer):
DescendantTitleBase.__init__(self, dbase, doc, locale, name_displayer)
def calc_title(self, person_id):
"""Calculate the title of the report"""
center = self.database.get_person_from_gramps_id(person_id)
family2_h = center.get_main_parents_family_handle()
if family2_h:
family2 = self.database.get_family_from_handle(family2_h)
else:
family2 = None
person_list = None
if family2:
father2_h = family2.get_father_handle()
mother2_h = family2.get_mother_handle()
person_list = [self.database.get_person_from_handle(handle)
for handle in [father2_h, mother2_h] if handle]
if not person_list:
person_list = [center]
self.text = self.descendant_print(person_list)
self.set_box_height_width()
class TitleDPN(DescendantTitleBase):
"""Hourglass (Person no start with parents) Chart
Title class for the report """
def __init__(self, dbase, doc, locale, name_displayer):
DescendantTitleBase.__init__(self, dbase, doc, locale, name_displayer)
def calc_title(self, person_id):
"""Calculate the title of the report"""
center = self.database.get_person_from_gramps_id(person_id)
title = self.descendant_print([center])
self.text = title
self.set_box_height_width()
class TitleDFY(DescendantTitleBase):
"""Hourglass (Family yes start with parents) Chart
Title class for the report """
def __init__(self, dbase, doc, locale, name_displayer):
DescendantTitleBase.__init__(self, dbase, doc, locale, name_displayer)
def get_parent_list(self, person):
""" return a list of my parents. If none, return me """
if not person:
return None
parent_list = None
family_h = person.get_main_parents_family_handle()
if family_h:
family = self.database.get_family_from_handle(family_h)
else:
family = None
if family: # family = fathers parents
father_h = family.get_father_handle()
mother_h = family.get_mother_handle()
parent_list = [self.database.get_person_from_handle(handle)
for handle in [father_h, mother_h] if handle]
return parent_list or [person]
def calc_title(self, family_id):
"""Calculate the title of the report"""
my_parents = self.get_parents(family_id)
dad_parents = self.get_parent_list(my_parents[0])
mom_parents = []
if len(my_parents) > 1:
if not dad_parents:
dad_parents = self.get_parent_list(my_parents[1])
else:
mom_parents = self.get_parent_list(my_parents[1])
self.text = self.descendant_print(dad_parents, mom_parents)
self.set_box_height_width()
class TitleDFN(DescendantTitleBase):
"""Hourglass (Family no start with parents) Chart
Title class for the report """
def __init__(self, dbase, doc, locale, name_displayer):
DescendantTitleBase.__init__(self, dbase, doc, locale, name_displayer)
def calc_title(self, family_id):
"""Calculate the title of the report"""
self.text = self.descendant_print(self.get_parents(family_id))
self.set_box_height_width()
class TitleF(DescendantTitleBase):
"""Family Chart Title class for the report """
def __init__(self, dbase, doc, locale, name_displayer):
DescendantTitleBase.__init__(self, dbase, doc, locale, name_displayer)
def calc_title(self, family_id):
"""Calculate the title of the report"""
parents = self.get_parents(family_id)
names = self._get_names(parents, self._nd)
if len(parents) == 1:
title = self._(
"Family Chart for %(person)s") % {'person': names[0]}
elif len(parents) == 2:
title = self._(
"Family Chart for %(father1)s and %(mother1)s") % {
'father1': names[0], 'mother1': names[1]}
#else:
# title = str(tmp) + " " + str(len(tmp))
self.text = title
self.set_box_height_width()
class TitleC(DescendantTitleBase):
"""Cousin Chart Title class for the report """
def __init__(self, dbase, doc, locale, name_displayer):
DescendantTitleBase.__init__(self, dbase, doc, locale, name_displayer)
def calc_title(self, family_id):
"""Calculate the title of the report"""
family = self.database.get_family_from_gramps_id(family_id)
kids = [self.database.get_person_from_handle(kid.ref)
for kid in family.get_child_ref_list()]
#ok we have the children. Make a title off of them
# Translators: needed for Arabic, ignore otherwise
cousin_names = self._(', ').join(self._get_names(kids, self._nd))
self.text = self._(
"Cousin Chart for %(names)s") % {'names' : cousin_names}
self.set_box_height_width()
# -----------------------------------------------------------------------
#
# PART 1. PEDIGREE
#
# -----------------------------------------------------------------------
#------------------------------------------------------------------------
#
# CalcItems (helper class to calculate text)
# make_ancestor_tree (main recursive functions)
#
#------------------------------------------------------------------------
class CalcItems:
""" A helper class to calculate the default box text
and text for each person / marriage
"""
def __init__(self, dbase):
_gui = GuiConnect()
self._gui = _gui
#calculate the printed lines for each box
#str = ""
#if self.get_val('miss_val'):
# str = "_____"
display_repl = _gui.get_val("replace_list")
self.center_use = _gui.get_val("descend_disp") # _gui.get_val("center_uses")
self.disp_father = self.center_use # _gui.get_val("father_disp")
self.disp_mother = self.center_use # _gui.get_val("mother_disp")
self.disp_marr = [_gui.get_val("marr_disp")]
self.__calc_l = | |
<reponame>mongodb/dsi<filename>dsi/tests/test_config.py<gh_stars>1-10
# -*- coding: UTF-8 -*-
"""Tests for dsi/common/config.py"""
import os
import unittest
import yaml
from six.moves import range
from six.moves import zip
import dsi.common.whereami as whereami
from dsi.common import config
from dsi.common.config import ConfigDict
from test_lib.fixture_files import FixtureFiles, load_config_dict
from test_lib.io_utils import in_dir
FIXTURE_FILES = FixtureFiles()
def dirmarker(into):
"""chdir into `into` (relatie to __file__) and return a function
that when called will chdir back to where you were before.
Example usage:
marker = dirmarker('subdir')
process_file('foo.txt') # inside subdir
marker()
"""
old_dir = os.getcwd()
path = os.path.join(os.path.dirname(os.path.abspath(__file__)), into)
os.chdir(path)
return lambda: os.chdir(old_dir)
class InvalidConfigDictTestCase(unittest.TestCase):
"""Test that we're as picky as we claim to be with config keys and values"""
def test_load_yaml_invalid_keys(self):
"""can't even get bad keys from yaml"""
with in_dir(FIXTURE_FILES.fixture_file_path("invalid-config")):
with self.assertRaises(config.InvalidConfigurationException):
ConfigDict("mongodb_setup").load()
def test_set_invalid_key(self):
"""can't use conf[key] = X with key invalid"""
with in_dir(FIXTURE_FILES.fixture_file_path("nested-config")):
conf = load_config_dict("mongodb_setup")
self.assertEqual(
conf["mongodb_setup"]["this"]["is"]["quite"]["deeply"]["nested"], "okay"
)
conf["mongodb_setup"]["out"] = {}
conf["mongodb_setup"]["out"]["safe-key"] = "💃"
self.assertEqual(conf["mongodb_setup"]["out"]["safe-key"], "💃")
def causes_exception(self, subdict):
"""
Helper method - assert we get an exception when `subdict` is inserted into an out config
"""
with in_dir(FIXTURE_FILES.fixture_file_path("nested-config")):
conf = load_config_dict("mongodb_setup")
with self.assertRaises(config.InvalidConfigurationException):
conf["mongodb_setup"]["out"] = {"okay": [subdict]}
def test_assigns_invalid_space_key(self):
"""spaces not allowed"""
self.causes_exception(
{
"this has a space": "and shouldn't work (because it has a back problem not because it's lazy and entitled"
}
)
def test_assigns_invalid_numeric_key(self):
"""number-only not allowed"""
self.causes_exception({"1": "woah dude a numeric-only key. get a job, you hippie."})
def test_assigns_exclamation_point_key(self):
"""! not allowed"""
self.causes_exception({"hello!": "is it me you're looking for?"})
def test_assigns_dot_key(self):
"""dots not allowed"""
self.causes_exception({"so...uh...": "dot dot dot"})
def test_assigns_slashy_key(self):
"""slashes not allowed"""
self.causes_exception({"data/logs": "logging kills trees"})
def test_assigns_invalid_nested_dict_multiple_errors(self):
"""assign invalid key from a nested dict with multiple errors"""
with in_dir(FIXTURE_FILES.fixture_file_path("nested-config")):
conf = load_config_dict("mongodb_setup")
with self.assertRaises(config.InvalidConfigurationException) as context:
conf["mongodb_setup"]["out"] = {
"okay": [
{
"okay": "this is fine",
"not okay": "you're killing me, bro!",
"seriously, just stop now": "but all the cool kids are doing it",
}
]
}
# we're non-normative on what the actual message is, but we
# do care that all the errored keys are there
self.assertRegex(str(context.exception), r"not okay")
self.assertRegex(str(context.exception), r"seriously")
def causes_id_exception(self, subdict):
"""
Helper method - assert we get an exception when subdict is inserted into a config.
Note: These tests explicitly test `validate_id` when called from `_yaml_load` (as opposed to
when it's called from `assert_valid_ids`).
"""
with in_dir(FIXTURE_FILES.fixture_file_path("invalid-ids")):
conf = load_config_dict("mongodb_setup")
with self.assertRaises(config.InvalidConfigurationException):
conf["mongodb_setup"]["test"] = {"okay": [subdict]}
def test_id_is_reserved_word(self):
"""
Cannot use a reserved word (enumerated in `config.py`) as an id.
"""
self.causes_id_exception({"id": "pre_task"})
def test_id_matches_reserved_pattern(self):
"""
Cannot use a word matching the pattern /on_.*/ as an id.
"""
self.causes_id_exception({"id": "on_load"})
def test_id_value_type_incorrect(self):
"""
Cannot have an id value that is not a scalar.
"""
self.causes_id_exception({"id": {"wrong": "format"}})
def test_duplicate_id_same_level(self):
"""
Cannot have duplicate ids on the same level.
"""
self.causes_id_exception(
{"stuff": [{"id": "myname", "hey": "greetings"}, {"id": "myname", "bye": "see ya"}]}
)
def test_nested_duplicate_ids(self):
"""
Ids must be globally unique in config files.
"""
self.causes_id_exception({"id": "myname", "test": {"id": "myname"}})
def test_variable_reference_is_invalid_id(self):
"""
Variable references cannot evaluate to duplicate ids.
"""
with in_dir(FIXTURE_FILES.fixture_file_path("invalid-ids")):
with self.assertRaises(config.InvalidConfigurationException):
conf = ConfigDict("mongodb_setup")
conf.load()
def test_variable_reference_contains_invalid_id(self):
"""
Variable references cannot evaluate to blocks containing duplicate ids.
"""
with in_dir(FIXTURE_FILES.fixture_file_path("nested-invalid-ids")):
with self.assertRaises(config.InvalidConfigurationException):
conf = ConfigDict("mongodb_setup")
conf.load()
def test_find_nested_config_dicts(self):
"""
We check for duplicate ids in lists of lists correctly.
"""
with in_dir(FIXTURE_FILES.fixture_file_path("invalid-ids-in-lists")):
with self.assertRaises(config.InvalidConfigurationException):
conf = ConfigDict("mongodb_setup")
conf.load()
class ConfigDictTestCase(unittest.TestCase):
"""Unit tests for ConfigDict library."""
def setUp(self):
"""Init a ConfigDict object and load the configuration files from docs/config-specs/"""
self.conf = ConfigDict("mongodb_setup", whereami.dsi_repo_path("docs", "config-specs"))
self.conf.load()
self.assertEqual(self.conf.module, "mongodb_setup")
def test_load_new(self):
"""Test loading ConfigDict with old naming convention .yml files"""
test_conf = ConfigDict(
"bootstrap", whereami.dsi_repo_path("dsi", "tests", "test_config_files", "new_format")
)
test_conf.load()
self.assertFalse("cluster_type" in test_conf.raw["bootstrap"])
self.assertTrue("infrastructure_provisioning" in test_conf.raw["bootstrap"])
self.assertFalse("cluster_type" in test_conf.defaults["bootstrap"])
self.assertTrue("infrastructure_provisioning" in test_conf.defaults["bootstrap"])
def test_none_valued_keys(self):
config_dict = self.conf
self.assertEqual(config_dict["runtime"]["overridden_none"], "hey there")
self.assertEqual(config_dict["runtime"]["override_with_none"], None)
self.assertEqual(config_dict["runtime"]["overridden_dict"], None)
self.assertEqual(config_dict["runtime"]["overridden_list"], None)
with self.assertRaises(KeyError):
config_dict["runtime"]["nonexistant"] # pylint: disable=pointless-statement
def test_traverse_entire_dict(self):
"""Traverse entire dict (also tests that the structure of docs/config-specs/ files are ok)"""
# We actually could compare the result to a constant megadict here, but maintaining that
# would quickly become tedious. In practice, there's huge value just knowing we can traverse
# the entire structure without errors.
str(self.conf)
@unittest.skip("dict(instance_of_ConfigDict) does not work")
def test_cast_as_dict(self):
"""It is possible to cast a ConfigDict to a dict"""
# TODO: this doesn't actually work. Seems like a limitation of python when sub-classing
# native type like dict: http://stackoverflow.com/questions/18317905/overloaded-iter-is-bypassed-when-deriving-from-dict
complete_dict = dict(self.conf)
sub_dict = dict(self.conf["workload_setup"]["tasks"][0]["on_workload_client"])
self.assertEqual(
complete_dict["workload_setup"]["tasks"][0]["on_workload_client"]["retrieve_files"][0],
{"source": "http://url1", "target": "file"},
)
self.assertEqual(
sub_dict["retrieve_files"][0],
{"source": "remote_file_path", "target": "local_file_path"},
)
def test_convert_to_dict(self):
"""It is possible to convert a ConfigDict to a dict with self.as_dict()"""
complete_dict = self.conf.as_dict()
sub_dict = self.conf["workload_setup"]["ycsb"][0]["on_workload_client"].as_dict()
self.assertEqual(
complete_dict["workload_setup"]["ycsb"][0]["on_workload_client"]["retrieve_files"][0],
{"source": "remote_file_path", "target": "local_file_path"},
)
self.assertEqual(
sub_dict["retrieve_files"][0],
{"source": "remote_file_path", "target": "local_file_path"},
)
def test_basic_checks(self):
"""Basic checks"""
self.assert_equal_dicts(
self.conf["workload_setup"]["ycsb"][0]["on_workload_client"]["retrieve_files"][0],
{"source": "remote_file_path", "target": "local_file_path"},
)
expected_result = [{"source": "remote_file_path", "target": "local_file_path"}]
actual_result = self.conf["workload_setup"]["ycsb"][0]["on_workload_client"][
"retrieve_files"
]
self.assertEqual(len(actual_result), len(expected_result))
for actual, expected in zip(actual_result, expected_result):
self.assert_equal_dicts(actual, expected)
self.assert_equal_dicts(
self.conf["infrastructure_provisioning"]["out"]["mongos"][2],
{"public_ip": "192.168.3.11", "private_ip": "10.2.1.102"},
)
self.assertEqual(
self.conf["infrastructure_provisioning"]["out"]["workload_client"][0]["public_ip"],
"172.16.17.32",
)
self.assertEqual(
type(
self.conf["infrastructure_provisioning"]["out"]["workload_client"][0]["public_ip"]
),
type(""),
)
def test_overrides(self):
"""Test value from overrides.yml"""
self.assertEqual(
self.conf["infrastructure_provisioning"]["tfvars"]["configsvr_instance_type"],
"t1.micro",
)
self.assertEqual(
self.conf["infrastructure_provisioning"]["tfvars"].as_dict(),
{
"cluster_name": "shard",
"mongos_instance_type": "c3.8xlarge",
"availability_zone": "us-west-2a",
"workload_instance_count": 1,
"region": "us-west-2",
"image": "amazon2",
"mongod_instance_count": 9,
"configsvr_instance_count": 3,
"mongos_instance_count": 3,
"ssh_key_file": "~/.ssh/linustorvalds.pem",
"ssh_user": "ec2-user",
"mongod_instance_type": "c3.8xlarge",
"ssh_key_name": "linus.torvalds",
"workload_instance_type": "c3.8xlarge",
"tags": {
"Project": "sys-perf",
"owner": "<EMAIL>",
"Variant": "Linux 3-shard cluster",
"expire-on-delta": 2,
},
"configsvr_instance_type": "t1.micro",
"expire-on-delta": 24,
},
)
def test_defaults(self):
"""Test value from defaults.yml"""
self.assertEqual(self.conf["mongodb_setup"]["mongod_config_file"]["net"]["port"], 27017)
self.assertEqual(
self.conf["mongodb_setup"]["mongod_config_file"]["processManagement"]["fork"], True
)
def test_copy(self):
"""Copy value into new python variable"""
out = self.conf["infrastructure_provisioning"]["out"]
self.conf.raw["infrastructure_provisioning"]["out"]["workload_client"][0][
"private_ip"
] = "foo"
out.raw["workload_client"][0]["public_ip"] = "bar"
self.assertTrue(isinstance(out, ConfigDict))
self.assert_equal_lists(
self.conf.raw["infrastructure_provisioning"]["out"]["workload_client"],
[{"public_ip": "bar", "private_ip": "foo"}],
)
self.assert_equal_lists(
self.conf.root["infrastructure_provisioning"]["out"]["workload_client"],
[{"public_ip": "bar", "private_ip": "foo"}],
)
self.assert_equal_lists(
out.raw["workload_client"], [{"public_ip": "bar", "private_ip": "foo"}]
)
self.assert_equal_lists(
out.root["infrastructure_provisioning"]["out"]["workload_client"],
[{"public_ip": "bar", "private_ip": "foo"}],
)
self.assert_equal_dicts(out.overrides, {})
self.assertEqual(out["workload_client"][0]["public_ip"], "bar")
def test_items(self):
actual = {k for k, v in self.conf["bootstrap"].items()}
expect = {
"production",
"analysis",
"workload_setup",
"terraform",
"infrastructure_provisioning",
"overrides",
"storageEngine",
"test_control",
"platform",
"mongodb_setup",
}
self.assertEqual(actual, expect)
def test_variable_references(self):
"""Test ${variable.references}"""
self.assertEqual(
self.conf["mongodb_setup"]["topology"][0]["mongos"][0]["private_ip"], "10.2.1.100"
)
self.assertEqual(
self.conf["mongodb_setup"]["meta"]["hosts"],
"10.2.1.100:27017,10.2.1.101:27017,10.2.1.102:27017",
)
# reference to reference
self.assertEqual(self.conf["mongodb_setup"]["meta"]["hostname"], "10.2.1.100")
# recursive reference ${a.${foo}.c} where "foo: b"
value = self.conf["test_control"]["run"][0]["workload_config"]["tests"]["default"][2][
"insert_vector"
]["thread_levels"]
expected = [1, 8, 16]
self.assertEqual(value, expected)
def test_variable_reference_in_list(self):
"""Test ${variable.references} in a list"""
self.assertEqual(self.conf["mongodb_setup"]["validate"]["primaries"][0], "10.2.1.1:27017")
def test_variable_reference_value_error(self):
"""Test ${variable.references} that point to nonexisting value PERF-1705"""
# PERF-1705 happened when infrastructure_provisioning.out doesn't exist and variable
# references point to it.
del self.conf.raw["infrastructure_provisioning"]["out"]
# ConfigDict is late binding
# assert_valid_ids() (used in load()) should not raise for such variable references.
self.conf.assert_valid_ids()
# Otoh actively accessing a field with such a variable reference must raise
with self.assertRaises(ValueError):
_ = self.conf["mongodb_setup"]["meta"]["mongodb_url"]
# As must other methods where user causes entire ConfigDict to be traversed
with self.assertRaises(ValueError):
_ = self.conf.as_dict()
with self.assertRaises(ValueError):
_ = str(self.conf)
def test_per_node_mongod_config(self):
"""Test magic per_node_mongod_config() (merging the common mongod_config_file with per node config_file)"""
mycluster = self.conf["mongodb_setup"]["topology"][0]
mongod = mycluster["shard"][2]["mongod"][0]
self.assert_equal_dicts(
mycluster["shard"][0]["mongod"][0]["config_file"],
{
"replication": {"replSetName": "override-rs"},
"systemLog": {"path": "data/logs/mongod.log", "destination": "file"},
"setParameter": {"enableTestCommands": True, "foo": True},
"net": {"port": 27017, "bindIp": "0.0.0.0"},
"processManagement": {"fork": True},
"storage": {"engine": "wiredTiger", "dbPath": "data/dbs"},
},
)
self.assert_equal_dicts(
mycluster["shard"][2]["mongod"][0]["config_file"],
{
"replication": {"replSetName": "override-rs"},
"systemLog": {"path": "data/logs/mongod.log", "destination": "file"},
"setParameter": {"enableTestCommands": True, "foo": True},
"net": {"port": 27017, "bindIp": "0.0.0.0"},
"processManagement": {"fork": True},
"storage": {"engine": "inMemory", "dbPath": "data/dbs"},
},
)
self.assert_equal_dicts(
mycluster["shard"][2]["mongod"][0]["config_file"].overrides,
{"storage": {"engine": "inMemory"}},
)
self.assertEqual(
mycluster["shard"][2]["mongod"][0]["config_file"]["storage"]["engine"], "inMemory"
)
self.assertEqual(mycluster["shard"][2]["mongod"][0]["config_file"]["net"]["port"], 27017)
self.assertEqual(
mycluster["shard"][2]["mongod"][0]["config_file"]["net"]["bindIp"], "0.0.0.0"
)
self.assertEqual(
mycluster["shard"][2]["mongod"][0]["config_file"]["processManagement"]["fork"], True
)
self.assertEqual(
mongod.raw,
{
"public_ip": "${infrastructure_provisioning.out.mongod.6.public_ip}",
"mongodb_binary_archive": "${mongodb_setup.mongodb_binary_archive}",
"config_file": {"storage": {"engine": "inMemory"}},
"private_ip": "${infrastructure_provisioning.out.mongod.6.private_ip}",
},
)
# Standalone node
self.assert_equal_dicts(
self.conf["mongodb_setup"]["topology"][2]["config_file"],
{
"replication": {"replSetName": "override-rs"},
"systemLog": {"path": "data/logs/mongod.log", "destination": "file"},
"setParameter": {"enableTestCommands": True, "foo": True},
"net": {"port": 27017, "bindIp": "0.0.0.0"},
"processManagement": {"fork": True},
"storage": {"engine": "wiredTiger", "dbPath": "data/dbs"},
},
)
# self.keys() should return a 'config_file' key
self.assertTrue("config_file" in mycluster["shard"][0]["mongod"][0].keys())
self.assertTrue("config_file" in mycluster["shard"][2]["mongod"][0].keys())
self.assertTrue("config_file" in self.conf["mongodb_setup"]["topology"][2].keys())
self.assertFalse("config_file" in self.conf["mongodb_setup"]["topology"][0].keys())
def test_replset_rs_conf(self):
"""Test magic rs_conf for a replset"""
mycluster = self.conf["mongodb_setup"]["topology"][0]
rs_conf = mycluster["shard"][2]["rs_conf"]
self.assertEqual(rs_conf["protocolVersion"], 1)
myreplset = self.conf["mongodb_setup"]["topology"][1]
rs_conf = myreplset["rs_conf"]
self.assertEqual(rs_conf["settings"]["chainingAllowed"], False)
self.assertEqual(rs_conf["protocolVersion"], 1)
# conf.keys() should return a 'config_file' key for replsets, not | |
##########################################################################
#
# Copyright (c) 2017, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of <NAME> nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import threading
import stat
import imath
import IECore
import Gaffer
import GafferTest
import GafferImage
import GafferImageTest
class CatalogueTest( GafferImageTest.ImageTestCase ) :
@staticmethod
def sendImage( image, catalogue, extraParameters = {}, waitForSave = True, close = True ) :
if catalogue["directory"].getValue() and waitForSave :
# When the image has been received, the Catalogue will
# save it to disk on a background thread, and we need
# to wait for that to complete.
with GafferTest.ParallelAlgoTest.ExpectedUIThreadCall() :
return GafferImageTest.DisplayTest.Driver.sendImage( image, GafferImage.Catalogue.displayDriverServer().portNumber(), extraParameters, close = close )
else :
return GafferImageTest.DisplayTest.Driver.sendImage( image, GafferImage.Catalogue.displayDriverServer().portNumber(), extraParameters, close = close )
def testImages( self ) :
images = []
readers = []
for i, fileName in enumerate( [ "checker.exr", "blurRange.exr", "noisyRamp.exr", "resamplePatterns.exr" ] ) :
images.append( GafferImage.Catalogue.Image.load( "${GAFFER_ROOT}/python/GafferImageTest/images/" + fileName ) )
readers.append( GafferImage.ImageReader() )
readers[-1]["fileName"].setValue( images[-1]["fileName"].getValue() )
c = GafferImage.Catalogue()
for image in images :
c["images"].addChild( image )
self.assertImagesEqual( readers[0]["out"], c["out"], ignoreMetadata = True )
def assertExpectedImages() :
for i, reader in enumerate( readers ) :
c["imageIndex"].setValue( i )
self.assertImagesEqual( readers[i]["out"], c["out"], ignoreMetadata = True )
assertExpectedImages()
for i in [ 1, 0, 1, 0 ] :
c["images"].removeChild( c["images"][i] )
del readers[i]
assertExpectedImages()
def testDescription( self ) :
c = GafferImage.Catalogue()
c["images"].addChild( c.Image.load( "${GAFFER_ROOT}/python/GafferImageTest/images/blurRange.exr" ) )
self.assertEqual( c["out"]["metadata"].getValue()["ImageDescription"].value, "" )
c["images"][-1]["description"].setValue( "ddd" )
self.assertEqual( c["out"]["metadata"].getValue()["ImageDescription"].value, "ddd" )
def testDescriptionLoading( self ) :
c = GafferImage.Constant()
m = GafferImage.ImageMetadata()
m["in"].setInput( c["out"] )
m["metadata"].addChild( Gaffer.NameValuePlug( "ImageDescription", "i am a description" ) )
w = GafferImage.ImageWriter()
w["in"].setInput( m["out"] )
w["fileName"].setValue( os.path.join( self.temporaryDirectory(), "description.exr" ) )
w["task"].execute()
r = GafferImage.ImageReader()
r["fileName"].setValue( w["fileName"].getValue() )
i = GafferImage.Catalogue.Image.load( w["fileName"].getValue() )
self.assertEqual( i["description"].getValue(), "i am a description" )
def testSerialisation( self ) :
s = Gaffer.ScriptNode()
s["c"] = GafferImage.Catalogue()
for i, fileName in enumerate( [ "checker.exr", "blurRange.exr" ] ) :
s["c"]["images"].addChild(
GafferImage.Catalogue.Image.load(
"${GAFFER_ROOT}/python/GafferImageTest/images/" + fileName,
)
)
s2 = Gaffer.ScriptNode()
s2.execute( s.serialise() )
self.assertEqual( len( s["c"]["images"] ), len( s2["c"]["images"] ) )
for i in range( 0, len( s["c"]["images"] ) ) :
self.assertEqual(
s["c"]["images"][i]["fileName"].getValue(),
s2["c"]["images"][i]["fileName"].getValue()
)
s["c"]["imageIndex"].setValue( i )
s2["c"]["imageIndex"].setValue( i )
self.assertImagesEqual( s["c"]["out"], s2["c"]["out"] )
s3 = Gaffer.ScriptNode()
s3.execute( s2.serialise() )
self.assertEqual( s3.serialise(), s2.serialise() )
self.assertFalse( "setInput" in s3.serialise( filter = Gaffer.StandardSet( [ s3["c"] ] ) ) )
def testDisabling( self ) :
c1 = GafferImage.Catalogue()
c1["images"].addChild(
GafferImage.Catalogue.Image.load( "${GAFFER_ROOT}/python/GafferImageTest/images/checker.exr" )
)
c2 = GafferImage.Catalogue()
c2["images"].addChild(
GafferImage.Catalogue.Image.load( "${GAFFER_ROOT}/python/GafferImageTest/images/checker.exr" )
)
self.assertImagesEqual( c1["out"], c2["out"] )
c2["enabled"].setValue( False )
self.assertNotEqual( c2["out"]["format"].getValue(), c1["out"]["format"].getValue() )
self.assertNotEqual( c2["out"]["dataWindow"].getValue(), c1["out"]["dataWindow"].getValue() )
self.assertEqual( c2["out"]["dataWindow"].getValue(), imath.Box2i() )
disabledConstant = GafferImage.Constant()
disabledConstant["enabled"].setValue( False )
self.assertImagesEqual( c2["out"], disabledConstant["out"] )
def testDisplayDriver( self ) :
c = GafferImage.Catalogue()
self.assertEqual( len( c["images"] ), 0 )
r = GafferImage.ImageReader()
r["fileName"].setValue( "${GAFFER_ROOT}/python/GafferImageTest/images/checker.exr" )
self.sendImage( r["out"], c )
self.assertEqual( len( c["images"] ), 1 )
self.assertEqual( c["images"][0]["fileName"].getValue(), "" )
self.assertEqual( c["imageIndex"].getValue(), 0 )
self.assertImagesEqual( r["out"], c["out"], ignoreMetadata = True )
r["fileName"].setValue( "${GAFFER_ROOT}/python/GafferImageTest/images/blurRange.exr" )
self.sendImage( r["out"], c )
self.assertEqual( len( c["images"] ), 2 )
self.assertEqual( c["images"][1]["fileName"].getValue(), "" )
self.assertEqual( c["imageIndex"].getValue(), 1 )
self.assertImagesEqual( r["out"], c["out"], ignoreMetadata = True )
def testDisplayDriverAOVGrouping( self ) :
c = GafferImage.Catalogue()
self.assertEqual( len( c["images"] ), 0 )
aov1 = GafferImage.Constant()
aov1["format"].setValue( GafferImage.Format( 100, 100 ) )
aov1["color"].setValue( imath.Color4f( 1, 0, 0, 1 ) )
aov2 = GafferImage.Constant()
aov2["format"].setValue( GafferImage.Format( 100, 100 ) )
aov2["color"].setValue( imath.Color4f( 0, 1, 0, 1 ) )
aov2["layer"].setValue( "diffuse" )
self.sendImage( aov1["out"], c )
self.sendImage( aov2["out"], c )
self.assertEqual( len( c["images"] ), 1 )
self.assertEqual(
set( c["out"]["channelNames"].getValue() ),
set( aov1["out"]["channelNames"].getValue() ) | set( aov2["out"]["channelNames"].getValue() )
)
def testDisplayDriverSaveToFile( self ) :
s = Gaffer.ScriptNode()
s["c"] = GafferImage.Catalogue()
s["c"]["directory"].setValue( os.path.join( self.temporaryDirectory(), "catalogue" ) )
r = GafferImage.ImageReader()
r["fileName"].setValue( "${GAFFER_ROOT}/python/GafferImageTest/images/blurRange.exr" )
self.sendImage( r["out"], s["c"] )
self.assertEqual( len( s["c"]["images"] ), 1 )
self.assertEqual( os.path.dirname( s["c"]["images"][0]["fileName"].getValue() ), s["c"]["directory"].getValue() )
self.assertImagesEqual( s["c"]["out"], r["out"], ignoreMetadata = True, maxDifference = 0.0003 )
s2 = Gaffer.ScriptNode()
s2.execute( s.serialise() )
self.assertEqual( len( s2["c"]["images"] ), 1 )
self.assertEqual( s2["c"]["images"][0]["fileName"].getValue(), s["c"]["images"][0]["fileName"].getValue() )
self.assertImagesEqual( s2["c"]["out"], r["out"], ignoreMetadata = True, maxDifference = 0.0003 )
def testCatalogueName( self ) :
c1 = GafferImage.Catalogue()
c2 = GafferImage.Catalogue()
c2["name"].setValue( "catalogue2" )
self.assertEqual( len( c1["images"] ), 0 )
self.assertEqual( len( c2["images"] ), 0 )
constant1 = GafferImage.Constant()
constant2 = GafferImage.Constant()
constant1["format"].setValue( GafferImage.Format( 100, 100 ) )
constant2["format"].setValue( GafferImage.Format( 100, 100 ) )
constant1["color"].setValue( imath.Color4f( 1, 0, 0, 1 ) )
constant2["color"].setValue( imath.Color4f( 0, 1, 0, 1 ) )
self.sendImage(
constant1["out"],
c1,
)
self.sendImage(
constant2["out"],
c2,
extraParameters = {
"catalogue:name" : "catalogue2",
}
)
self.assertEqual( len( c1["images"] ), 1 )
self.assertEqual( len( c2["images"] ), 1 )
self.assertImagesEqual( c1["out"], constant1["out"], ignoreMetadata = True )
self.assertImagesEqual( c2["out"], constant2["out"], ignoreMetadata = True )
def testDontSerialiseUnsavedRenders( self ) :
s = Gaffer.ScriptNode()
s["c"] = GafferImage.Catalogue()
constant = GafferImage.Constant()
constant["format"].setValue( GafferImage.Format( 100, 100 ) )
self.sendImage(
constant["out"],
s["c"],
)
self.assertEqual( len( s["c"]["images"] ), 1 )
s2 = Gaffer.ScriptNode()
s2.execute( s.serialise() )
self.assertEqual( len( s2["c"]["images"] ), 0 )
def testPromotion( self ) :
s = Gaffer.ScriptNode()
s["b"] = Gaffer.Box()
s["b"]["c"] = GafferImage.Catalogue()
promotedImages = Gaffer.PlugAlgo.promote( s["b"]["c"]["images"] )
promotedImageIndex = Gaffer.PlugAlgo.promote( s["b"]["c"]["imageIndex"] )
promotedOut = Gaffer.PlugAlgo.promote( s["b"]["c"]["out"] )
images = []
readers = []
for i, fileName in enumerate( [ "checker.exr", "blurRange.exr", "noisyRamp.exr" ] ) :
images.append( GafferImage.Catalogue.Image.load( "${GAFFER_ROOT}/python/GafferImageTest/images/" + fileName ) )
readers.append( GafferImage.ImageReader() )
readers[-1]["fileName"].setValue( images[-1]["fileName"].getValue() )
for image in images :
promotedImages.addChild( image )
self.assertImagesEqual( readers[0]["out"], promotedOut, ignoreMetadata = True )
for i, reader in enumerate( readers ) :
promotedImageIndex.setValue( i )
self.assertImagesEqual( readers[i]["out"], promotedOut, ignoreMetadata = True )
s2 = Gaffer.ScriptNode()
s2.execute( s.serialise() )
for i, reader in enumerate( readers ) :
s2["b"]["imageIndex"].setValue( i )
self.assertImagesEqual( readers[i]["out"], s2["b"]["out"], ignoreMetadata = True )
s3 = Gaffer.ScriptNode()
s3.execute( s.serialise() )
for i, reader in enumerate( readers ) :
s3["b"]["imageIndex"].setValue( i )
self.assertImagesEqual( readers[i]["out"], s3["b"]["out"], ignoreMetadata = True )
def testDisplayDriverAndPromotion( self ) :
s = Gaffer.ScriptNode()
s["b"] = Gaffer.Box()
s["b"]["c"] = GafferImage.Catalogue()
s["b"]["c"]["directory"].setValue( os.path.join( self.temporaryDirectory(), "catalogue" ) )
promotedImages = Gaffer.PlugAlgo.promote( s["b"]["c"]["images"] )
promotedImageIndex = Gaffer.PlugAlgo.promote( s["b"]["c"]["imageIndex"] )
promotedOut = Gaffer.PlugAlgo.promote( s["b"]["c"]["out"] )
r = GafferImage.ImageReader()
r["fileName"].setValue( "${GAFFER_ROOT}/python/GafferImageTest/images/checker.exr" )
self.sendImage( r["out"], s["b"]["c"] )
self.assertEqual( len( promotedImages ), 1 )
self.assertEqual( promotedImageIndex.getValue(), 0 )
self.assertImagesEqual( r["out"], promotedOut, ignoreMetadata = True )
r["fileName"].setValue( "${GAFFER_ROOT}/python/GafferImageTest/images/blurRange.exr" )
self.sendImage( r["out"], s["b"]["c"] )
self.assertEqual( len( promotedImages ), 2 )
self.assertEqual( promotedImageIndex.getValue(), 1 )
self.assertImagesEqual( r["out"], promotedOut, ignoreMetadata = True )
s2 = Gaffer.ScriptNode()
s2.execute( s.serialise() )
self.assertEqual( len( s2["b"]["images"] ), 2 )
self.assertEqual( s2["b"]["imageIndex"].getValue(), 1 )
self.assertImagesEqual( promotedOut, s2["b"]["c"]["out"], ignoreMetadata = True, maxDifference = 0.0003 )
s3 = Gaffer.ScriptNode()
s3.execute( s2.serialise() )
self.assertEqual( len( s3["b"]["images"] ), 2 )
self.assertEqual( s3["b"]["imageIndex"].getValue(), 1 )
self.assertImagesEqual( promotedOut, s3["b"]["c"]["out"], ignoreMetadata = True, maxDifference = 0.0003 )
def testDontSavePromotedUnsavedRenders( self ) :
s = Gaffer.ScriptNode()
s["b"] = Gaffer.Box()
s["b"]["c"] = GafferImage.Catalogue()
promotedImages = Gaffer.PlugAlgo.promote( s["b"]["c"]["images"] )
promotedImageIndex = Gaffer.PlugAlgo.promote( s["b"]["c"]["imageIndex"] )
promotedOut = Gaffer.PlugAlgo.promote( s["b"]["c"]["out"] )
r = GafferImage.ImageReader()
r["fileName"].setValue( "${GAFFER_ROOT}/python/GafferImageTest/images/checker.exr" )
self.sendImage( r["out"], s["b"]["c"] )
self.assertEqual( len( promotedImages ), 1 )
s2 = Gaffer.ScriptNode()
s2.execute( s.serialise() )
self.assertEqual( len( s2["b"]["images"] ), 0 )
def testUndoRedo( self ) :
s = Gaffer.ScriptNode()
s["c"] = GafferImage.Catalogue()
s["c"]["images"].addChild( s["c"].Image.load( "${GAFFER_ROOT}/python/GafferImageTest/images/checker.exr" ) )
r = GafferImage.ImageReader()
r["fileName"].setValue( "${GAFFER_ROOT}/python/GafferImageTest/images/blurRange.exr" )
def assertPreconditions() :
self.assertEqual( len( s["c"]["images"] ), 1 )
self.assertEqual( s["c"]["imageIndex"].getValue(), 0 )
assertPreconditions()
with Gaffer.UndoScope( s ) :
s["c"]["images"].addChild( s["c"].Image.load( r["fileName"].getValue() ) )
s["c"]["imageIndex"].setValue( 1 )
def assertPostConditions() :
self.assertEqual( len( s["c"]["images"] ), 2 )
self.assertEqual( s["c"]["imageIndex"].getValue(), 1 )
self.assertImagesEqual( s["c"]["out"], r["out"], ignoreMetadata = True )
assertPostConditions()
s.undo()
assertPreconditions()
s.redo()
assertPostConditions()
s.undo()
assertPreconditions()
def testGILManagement( self ) :
# Make a network where a Catalogue
# is merged with an image that depends
# on a python expression.
s = Gaffer.ScriptNode()
s["catalogue"] = GafferImage.Catalogue()
s["constant"] = GafferImage.Constant()
s["expression"] = Gaffer.Expression()
s["expression"].setExpression( 'parent["constant"]["color"]["r"] = context["image:tileOrigin"].x' )
s["merge"] = GafferImage.Merge()
s["merge"]["in"][0].setInput( s["catalogue"]["out"] )
s["merge"]["in"][1].setInput( s["constant"]["out"] )
# Arrange to generate the resulting image from C++
# threads whenever it is dirtied.
processTilesConnection = Gaffer.ScopedConnection( GafferImageTest.connectProcessTilesToPlugDirtiedSignal( s["merge"]["out"] ) )
# Send an image to the catalogue to demonstrate that
# we do not deadlock on the GIL.
r = GafferImage.ImageReader()
r["fileName"].setValue( "${GAFFER_ROOT}/python/GafferImageTest/images/checker.exr" )
self.sendImage( r["out"], s["catalogue"] )
def testCacheReuse( self ) :
# Send an image to the catalogue, and also
# capture the display driver that we used to
# send it.
c = GafferImage.Catalogue()
c["directory"].setValue( os.path.join( self.temporaryDirectory(), "catalogue" ) )
drivers = GafferTest.CapturingSlot( GafferImage.Display.driverCreatedSignal() )
r = GafferImage.ImageReader()
r["fileName"].setValue( "${GAFFER_ROOT}/python/GafferImageTest/images/checker.exr" )
self.sendImage( r["out"], c )
self.assertEqual( len( drivers ), 1 )
# The image will have been saved to disk so it can persist between sessions,
# and the Catalogue should have dropped any reference | |
2)
rot_mat = cv2.getRotationMatrix2D(image_center, angle_deg, 1.0)
result = cv2.warpAffine(
image, rot_mat, image.shape[1::-1], flags=cv2.INTER_LINEAR
)
return result
def crop_rectangle(
image: np.ndarray, crop: Tuple[int, int, int, int]
) -> np.ndarray:
return image[
compute.clamp(crop[2], 0, len(image) - 1) : compute.clamp(
crop[3], 0, len(image) - 1
),
compute.clamp(crop[0], 0, len(image[0]) - 1) : compute.clamp(
crop[1], 0, len(image[0]) - 1
),
]
def number_channels(image: np.ndarray) -> int:
if image.ndim == 2:
return 1
if image.ndim == 3:
return image.shape[-1]
raise Exception("Failed to found the number of channels.")
def force_image_to_be_grayscale(
image: np.ndarray, blur_kernel_size: Tuple[int, int]
) -> np.ndarray:
if number_channels(image) == 1:
one_channel_image = image.copy()
else:
one_channel_image = convertion_en_niveau_de_gris(image)
return cv2.blur(one_channel_image, blur_kernel_size)
def draw_lines_from_hough_lines(
image: np.ndarray,
lines: np.ndarray,
color: Tuple[int, int, int],
width: int,
) -> np.ndarray:
image_with_lines = convertion_en_couleur(image)
for line in lines:
for point1_x, point1_y, point2_x, point2_y in line:
cv2.line(
image_with_lines,
(point1_x, point1_y),
(point2_x, point2_y),
color,
width,
)
return image_with_lines
def get_area(image: np.ndarray) -> int:
return image.shape[0] * image.shape[1]
def get_hw(image: np.ndarray) -> Tuple[int, int]:
return (image.shape[0], image.shape[1])
def remove_border_in_contours(
contours: List[np.ndarray], border_size: int, image: np.ndarray
) -> List[np.ndarray]:
height, width = get_hw(image)
height -= 1
width -= 1
def subst(contour: np.ndarray) -> np.ndarray:
contour = contour - border_size
contour[:, 0, 0] = np.clip(contour[:, 0, 0], 0, width)
contour[:, 0, 1] = np.clip(contour[:, 0, 1], 0, height)
return contour
return list(map(subst, contours))
def split_image(
image: np.ndarray, angle: Angle, posx: int
) -> Tuple[np.ndarray, np.ndarray]:
height, width = get_hw(image)
toppoint = (posx, 0)
bottompoint = compute.get_bottom_point_from_alpha_posx(angle, posx, height)
# On défini le masque pour séparer la page droite et gauche
mask = np.zeros((height, width), np.uint8)
pts = np.array(
[
[0, 0],
[toppoint[0], 0],
[toppoint[0], toppoint[1]],
[bottompoint[0], bottompoint[1]],
[bottompoint[0], height - 1],
[0, height - 1],
]
)
mask2 = cv2.drawContours(
mask, np.asarray([pts], dtype=np.int32), 0, 255, -1
)
page_gauche = image.copy()
page_droite = image.copy()
# On applique le masque
page_gauche[mask2 == 0] = 0
page_droite[mask2 > 0] = 0
# On crop les images.
page_gauche_0 = crop_rectangle(
page_gauche,
(0, np.maximum(toppoint[0], bottompoint[0]) - 1, 0, height - 1),
)
page_droite_0 = crop_rectangle(
page_droite,
(np.minimum(toppoint[0], bottompoint[0]), width, 0, height - 1),
)
# On renvoie les images cropées.
return page_gauche_0, page_droite_0
def convertion_en_couleur(image: np.ndarray) -> np.ndarray:
# Already a 8 bit image.
if image.ndim == 2:
return cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
return image.copy()
def add_border_to_match_size(
image: np.ndarray,
paper_size_wh_cm: Tuple[float, float],
crop: Tuple[int, int, int, int],
shape_wh: Tuple[int, int],
dpi: int,
) -> Tuple[int, int, int, int]:
height, width = get_hw(image)
marge_haute_px = crop[2]
marge_basse_px = shape_wh[1] - 1 - crop[3]
pixels_manquant = paper_size_wh_cm[0] / 2.54 * dpi - width
if pixels_manquant < 0:
raise Exception("marge", "marge_gauche_px")
left = int(pixels_manquant / 2.0)
right = int(pixels_manquant / 2.0)
pixels_manquant = paper_size_wh_cm[1] / 2.54 * dpi - height
if pixels_manquant < 0:
raise Exception("marge", "marge_haute_px")
# If no crop at the previous operation, add the same value to the
# top and the bottom
if marge_haute_px == 0 and marge_basse_px == 0:
marge_haute_px = 1
marge_basse_px = 1
pourcenthaut = marge_haute_px / (marge_haute_px + marge_basse_px)
top = int(pixels_manquant * pourcenthaut)
pourcentbas = marge_basse_px / (marge_haute_px + marge_basse_px)
bottom = int(pixels_manquant * pourcentbas)
return (top, bottom, left, right)
def find_longest_lines_in_border(
shape: Tuple[int, int], epsilon: int, cnt: np.ndarray
) -> Tuple[Tuple[int, int], Tuple[int, int], Tuple[int, int], Tuple[int, int]]:
height, width = shape
left_top = height
left_bottom = 0
right_top = height
right_bottom = 0
top_left = width
top_right = 0
bottom_left = width
bottom_right = 0
for pt1, pt2 in compute.iterator_zip_n_n_1(cnt):
point1_x, point1_y = pt1[0]
point2_x, point2_y = pt2[0]
if point1_x <= epsilon and point2_x <= epsilon:
left_top = min(left_top, point1_y, point2_y)
left_bottom = max(left_bottom, point1_y, point2_y)
if point1_y <= epsilon and point2_y <= epsilon:
top_left = min(top_left, point1_x, point2_x)
top_right = max(top_right, point1_x, point2_x)
if point1_x >= width - 1 - epsilon and point2_x >= width - 1 - epsilon:
right_top = min(right_top, point1_y, point2_y)
right_bottom = max(right_bottom, point1_y, point2_y)
if (
point1_y >= height - 1 - epsilon
and point2_y >= height - 1 - epsilon
):
bottom_left = min(bottom_left, point1_x, point2_x)
bottom_right = max(bottom_right, point1_x, point2_x)
return (
(left_top, left_bottom),
(right_top, right_bottom),
(top_left, top_right),
(bottom_left, bottom_right),
)
def insert_border_in_mask(
cnt: np.ndarray,
threshold2: np.ndarray,
mask_border_only: np.ndarray,
epsilon: Tuple[int, Angle],
page_angle: Angle,
) -> None:
__pourcentage_white_allowed__ = 0.015
epsilon_border, epsilon_angle = epsilon
height, width = get_hw(threshold2)
cnt2 = cnt[cnt[:, 0, 0] > epsilon_border]
cnt3 = cnt2[cnt2[:, 0, 0] < width - 1 - epsilon_border]
cnt4 = cnt3[cnt3[:, 0, 1] > epsilon_border]
cnt5 = cnt4[cnt4[:, 0, 1] < height - 1 - epsilon_border]
if len(cnt5) == 0:
return
contour_approximate = cv2.approxPolyDP(cnt5, epsilon_border, True)
all_pair = list(compute.iterator_zip_n_n_1(contour_approximate))
all_pair_no_single_pixel = list(
filter(
lambda x: x[0][0][0] != x[1][0][0] or x[0][0][1] != x[1][0][1],
all_pair,
)
)
all_angles = list(
map(
lambda x: (
(x[0][0], x[1][0]),
compute.get_angle_0_180(x[0][0], x[1][0]),
np.linalg.norm(x[0][0] - x[1][0]), # type: ignore
),
all_pair_no_single_pixel,
)
)
vertical_lines = list(
filter(
lambda x: compute.is_angle_closed_to(
x[1],
page_angle + Angle.deg(90.0),
epsilon_angle,
Angle.deg(180),
),
all_angles,
)
)
horizontal_lines = list(
filter(
lambda x: compute.is_angle_closed_to(
x[1], page_angle, epsilon_angle, Angle.deg(180)
),
all_angles,
)
)
vertical_lines_pos = list(
map(
lambda x: (
compute.get_angle_0_180_posx_safe(x[0][0], x[0][1])[1],
x[1],
),
vertical_lines,
)
)
horizontal_lines_pos = list(
map(
lambda x: (
compute.get_angle_0_180_posy_safe(x[0][0], x[0][1])[1],
x[1],
),
horizontal_lines,
)
)
vertical_lines_pos.sort(key=lambda x: x[0])
horizontal_lines_pos.sort(key=lambda x: x[0])
for posx, angle in vertical_lines_pos:
mask = np.zeros((height, width), np.uint8)
bottom_point = compute.get_bottom_point_from_alpha_posx(
angle, posx, height
)
if posx < width / 2:
pts = np.array(
[
[-1, 0],
[posx - 1, 0],
[bottom_point[0] - 1, bottom_point[1]],
[-1, height - 1],
]
)
else:
pts = np.array(
[
[width, 0],
[posx + 1, 0],
[bottom_point[0] + 1, bottom_point[1]],
[width, height - 1],
]
)
mask = cv2.drawContours(mask, [pts], 0, 255, -1)
histogram = cv2.calcHist([threshold2], [0], mask, [2], [0, 256])
if __pourcentage_white_allowed__ * histogram[0] > sum(
histogram[1:]
) or __pourcentage_white_allowed__ * histogram[-1] > sum(
histogram[:-1]
):
mask_border_only = cv2.drawContours(
mask_border_only, [pts], 0, (0), -1
)
for posy, angle in horizontal_lines_pos:
mask = np.zeros((height, width), np.uint8)
bottom_point = compute.get_right_point_from_alpha_posy(
angle, posy, width
)
if posy < height / 2:
pts = np.array(
[
[0, -1],
[0, posy - 1],
[bottom_point[0], bottom_point[1] - 1],
[width - 1, -1],
]
)
else:
pts = np.array(
[
[0, height],
[0, posy + 1],
[bottom_point[0], bottom_point[1] + 1],
[width - 1, height],
]
)
mask = cv2.drawContours(mask, [pts], 0, 255, -1)
histogram = cv2.calcHist([threshold2], [0], mask, [2], [0, 256])
if __pourcentage_white_allowed__ * histogram[0] > sum(
histogram[1:]
) or __pourcentage_white_allowed__ * histogram[-1] > sum(
histogram[:-1]
):
mask_border_only = cv2.drawContours(
mask_border_only, [pts], 0, (0), -1
)
def apply_mask(image: np.ndarray, mask: np.ndarray) -> np.ndarray:
gray_bordered2 = cv2.bitwise_not(image)
gray_bordered3 = cv2.bitwise_and(gray_bordered2, gray_bordered2, mask=mask)
gray_bordered4 = cv2.bitwise_not(gray_bordered3)
# Borders are in white in original image.
return gray_bordered4
def erode_and_dilate(
image: np.ndarray,
size: Tuple[int, int],
iterations: int,
reverse: bool = False,
) -> np.ndarray:
start = int(reverse)
img = image
for i in range(2):
if (i + start) % 2 == 0:
img = cv2.erode(
img,
cv2.getStructuringElement(cv2.MORPH_ELLIPSE, size),
iterations=iterations,
)
else:
img = cv2.dilate(
img,
cv2.getStructuringElement(cv2.MORPH_ELLIPSE, size),
iterations=iterations,
)
return img
def threshold_from_gaussian_histogram_white(
image: np.ndarray, pourcentage: float = 0.2, blur_kernel_size: int = 31
) -> int:
histogram = cv2.calcHist([image], [0], None, [256], [0, 256])
histogram_blur = cv2.GaussianBlur(
histogram,
(1, blur_kernel_size),
blur_kernel_size,
borderType=cv2.BORDER_REPLICATE,
)
i = 255
extreme_min = histogram_blur[255][0]
for j in range(254, 0, -1):
if histogram_blur[j][0] < extreme_min:
extreme_min = histogram_blur[j][0]
else:
i = j
break
limit = extreme_min * (1 + pourcentage)
for j in range(i, 0, -1):
if histogram_blur[j][0] > limit:
i = j
break
return i
def threshold_from_gaussian_histogram_black(
image: np.ndarray, blur_kernel_size: int = 31
) -> int:
histogram = cv2.calcHist([image], [0], None, [256], [0, 256])
histogram_blur = cv2.GaussianBlur(
histogram,
(1, blur_kernel_size),
blur_kernel_size,
borderType=cv2.BORDER_REPLICATE,
)
for i in range(0, 255):
if histogram_blur[i][0] < histogram_blur[i + 1][0]:
return i
return 255
def gaussian_blur_wrap(histogram: np.ndarray, kernel_size: int) -> np.ndarray:
histogram_wrap = np.concatenate( # type: ignore
[
histogram[-kernel_size:],
histogram,
histogram[:kernel_size],
]
)
histogram_wrap_blur = cv2.GaussianBlur(
histogram_wrap,
(1, kernel_size),
kernel_size,
borderType=cv2.BORDER_REPLICATE,
)
return histogram_wrap_blur[kernel_size:-kernel_size]
def apply_brightness_contrast(
input_img: np.ndarray, brightness: int = 0, contrast: int = 0
) -> np.ndarray:
| |
<gh_stars>10-100
# type: ignore
"""
This code is taken from the OneIE
The script extracts IE annotations from ACE2005 (LDC2006T06).
Usage:
python process_ace.py \
"""
import glob
import json
import os
import re
from argparse import ArgumentParser
from dataclasses import dataclass
from typing import Any, Dict, List, Tuple
import tqdm
from bs4 import BeautifulSoup
from nltk import sent_tokenize as sent_tokenize_
from nltk import wordpunct_tokenize as wordpunct_tokenize_
from transformers import AutoTokenizer, PreTrainedTokenizer
TAG_PATTERN = re.compile("<[^<>]+>", re.MULTILINE)
DOCS_TO_REVISE_SENT = {
# "CNN_ENG_20030529_130011.6": [(461, 504), (668, 859), (984, 1074), (1577, 1632)],
"CNN_ENG_20030626_203133.11": [(1497, 1527)],
"CNN_ENG_20030526_180540.6": [(67, 99)],
"CNNHL_ENG_20030523_221118.14": [(136, 174)],
"BACONSREBELLION_20050127.1017": [(2659, 2663), (4381, 4405), (410, 458)],
"misc.legal.moderated_20050129.2225": [(4118, 4127), (4710, 4794)],
"alt.vacation.las-vegas_20050109.0133": [(1201, 1248)],
"alt.obituaries_20041121.1339": [(1947, 2044), (1731, 1737)],
"APW_ENG_20030326.0190": [(638, 739)],
"APW_ENG_20030403.0862": [(729, 781)],
"CNN_IP_20030405.1600.02": [(699, 705)],
"CNN_IP_20030403.1600.00-1": [(2392, 2399)],
"CNN_IP_20030409.1600.04": [(1039, 1050)],
"CNN_IP_20030412.1600.03": [(741, 772)],
"CNN_IP_20030402.1600.02-1": [(885, 892)],
"CNN_IP_20030329.1600.02": [(3229, 3235)],
"CNN_IP_20030409.1600.02": [(477, 498)],
"CNN_CF_20030304.1900.04": [(522, 575), (5193, 5210), (5461, 5542)],
"CNN_IP_20030403.1600.00-3": [(1487, 1493)],
"soc.history.war.world-war-ii_20050127.2403": [(414, 441)],
"CNN_ENG_20030529_130011.6": [
(209, 254),
(461, 504),
(668, 859),
(984, 1074),
(1577, 1632),
],
}
def mask_escape(text: str) -> str:
"""Replaces escaped characters with rare sequences.
Args:
text (str): text to mask.
Returns:
str: masked string.
"""
return (
text.replace("&", "ҪҪҪҪҪ").replace("<", "ҚҚҚҚ").replace(">", "ҺҺҺҺ")
)
def unmask_escape(text: str) -> str:
"""Replaces masking sequences with the original escaped characters.
Args:
text (str): masked string.
Returns:
str: unmasked string.
"""
return (
text.replace("ҪҪҪҪҪ", "&").replace("ҚҚҚҚ", "<").replace("ҺҺҺҺ", ">")
)
def recover_escape(text: str) -> str:
"""Converts named character references in the given string to the corresponding
Unicode characters. I didn't notice any numeric character references in this
dataset.
Args:
text (str): text to unescape.
Returns:
str: unescaped string.
"""
return text.replace("&", "&").replace("<", "<").replace(">", ">")
def sent_tokenize(
text: Tuple[str, int, int], language: str = "english"
) -> List[Tuple[str, int, int]]:
"""Performs sentence tokenization. For English, it uses NLTK's sent_tokenize
function. For Chinese, it uses split_chinese_sentence, a simple sentence
tokenizer implemented by myself.
Args:
text (Tuple[str, int, int]): a tuple of three elements, text to split
into sentences, start offset, and end offset.
language (str): available options: english, chinese.
Returns:
List[Tuple[str, int, int]]: a list of sentences.
"""
text, start, end = text
if language == "chinese":
sentences = split_chinese_sentence(text)
else:
# postprocessing arabic and preprocessing english
sentences = sent_tokenize_(text, language="english")
last = 0
sentences_ = []
for sent in sentences:
index = text[last:].find(sent)
if index == -1:
print(text, sent)
else:
sentences_.append(
(sent, last + index + start, last + index + len(sent) + start)
)
last += index + len(sent)
return sentences_
def wordpunct_tokenize(text: str, language: str = "english") -> List[str]:
"""Performs word tokenization. For English, it uses NLTK's
wordpunct_tokenize function. For Chinese, it simply splits the sentence into
characters.
Args:
text (str): text to split into words.
language (str): available options: english, chinese.
Returns:
List[str]: a list of words.
"""
if language == "chinese":
return [c for c in text if c.strip()]
return wordpunct_tokenize_(text)
def split_chinese_sentence(text: str) -> List[str]:
"""Performs sentence tokenization for Chinese.
Args:
text (str): text to split into sentences.
Returns:
List[str]: a list of sentences.
"""
sentences = []
quote_mark_count = 0
sentence = ""
for i, c in enumerate(text):
sentence += c
if c in {"”", "」"}:
sentences.append(sentence)
sentence = ""
elif c in {"。", "!", "?", "!", "?"}:
if i < len(text) - 1 and text[i + 1] not in {"”", '"', "」"}:
sentences.append(sentence)
sentence = ""
elif c == '"':
quote_mark_count += 1
if (
quote_mark_count % 2 == 0
and len(sentence) > 2
and sentence[-2] in {"?", "!", "。", "?", "!"}
):
sentences.append(sentence)
sentence = ""
if sentence:
sentences.append(sentence)
return sentences
@dataclass
class Span:
start: int
end: int
text: str
def __post_init__(self):
self.start = int(self.start)
self.end = int(self.end)
self.text = self.text.replace("\n", " ")
def char_offsets_to_token_offsets(self, tokens: List[Tuple[int, int, str]]):
"""Converts self.start and self.end from character offsets to token
offsets.
Args:
tokens (List[int, int, str]): a list of token tuples. Each item in
the list is a triple (start_offset, end_offset, text).
"""
start_ = end_ = -1
for i, (s, e, _) in enumerate(tokens):
if s == self.start:
start_ = i
if e == self.end:
end_ = i + 1
if start_ == -1 or end_ == -1 or start_ > end_:
raise ValueError(
"Failed to update offsets for {}-{}:{} in {}".format(
self.start, self.end, self.text, tokens
)
)
self.start, self.end = start_, end_
def to_dict(self) -> Dict[str, Any]:
"""Converts instance variables to a dict.
Returns:
dict: a dict of instance variables.
"""
return {"text": recover_escape(self.text), "start": self.start, "end": self.end}
def remove_space(self):
"""Removes heading and trailing spaces in the span text."""
# heading spaces
text = self.text.lstrip(" ")
self.start += len(self.text) - len(text)
# trailing spaces
text = text.rstrip(" ")
self.text = text
self.end = self.start + len(text)
def copy(self):
"""Makes a copy of itself.
Returns:
Span: a copy of itself."""
return Span(self.start, self.end, self.text)
@dataclass
class Entity(Span):
entity_id: str
mention_id: str
entity_type: str
entity_subtype: str
mention_type: str
value: str = None
def to_dict(self) -> Dict[str, Any]:
"""Converts instance variables to a dict.
Returns:
Dict: a dict of instance variables.
"""
entity_dict = {
"text": recover_escape(self.text),
"entity_id": self.entity_id,
"mention_id": self.mention_id,
"start": self.start,
"end": self.end,
"entity_type": self.entity_type,
"entity_subtype": self.entity_subtype,
"mention_type": self.mention_type,
}
if self.value:
entity_dict["value"] = self.value
return entity_dict
@dataclass
class RelationArgument:
mention_id: str
role: str
text: str
def to_dict(self) -> Dict[str, Any]:
"""Converts instance variables to a dict.
Returns:
Dict[str, Any]: a dict of instance variables.
"""
return {
"mention_id": self.mention_id,
"role": self.role,
"text": recover_escape(self.text),
}
@dataclass
class Relation:
relation_id: str
relation_type: str
relation_subtype: str
arg1: RelationArgument
arg2: RelationArgument
def to_dict(self) -> Dict[str, Any]:
"""Converts instance variables to a dict.
Returns:
Dict[str, Any]: a dict of instance variables.
"""
return {
"relation_id": self.relation_id,
"relation_type": self.relation_type,
"relation_subtype": self.relation_subtype,
"arg1": self.arg1.to_dict(),
"arg2": self.arg2.to_dict(),
}
@dataclass
class EventArgument:
mention_id: str
role: str
text: str
def to_dict(self) -> Dict[str, Any]:
"""Converts instance variables to a dict.
Returns:
Dict[str, Any]: a dict of instance variables.
"""
return {
"mention_id": self.mention_id,
"role": self.role,
"text": recover_escape(self.text),
}
@dataclass
class Event:
event_id: str
mention_id: str
event_type: str
event_subtype: str
trigger: Span
arguments: List[EventArgument]
def to_dict(self) -> Dict[str, Any]:
"""Converts instance variables to a dict.
Returns:
Dict[str, Any]: a dict of instance variables.
"""
return {
"event_id": self.event_id,
"mention_id": self.mention_id,
"event_type": self.event_type,
"event_subtype": self.event_subtype,
"trigger": self.trigger.to_dict(),
"arguments": [arg.to_dict() for arg in self.arguments],
}
@dataclass
class Sentence(Span):
sent_id: str
tokens: List[str]
entities: List[Entity]
relations: List[Relation]
events: List[Event]
def to_dict(self) -> Dict[str, Any]:
"""Converts instance variables to a dict.
Returns:
Dict[str, Any]: a dict of instance variables.
"""
return {
"sent_id": self.sent_id,
"tokens": [recover_escape(t) for t in self.tokens],
"entities": [entity.to_dict() for entity in self.entities],
"relations": [relation.to_dict() for relation in self.relations],
"events": [event.to_dict() for event in self.events],
"start": self.start,
"end": self.end,
"text": recover_escape(self.text).replace("\t", " "),
}
@dataclass
class Document:
doc_id: str
sentences: List[Sentence]
def to_dict(self) -> Dict[str, Any]:
"""Converts instance variables to a dict.
Returns:
Dict[str, Any]: a dict of instance variables.
"""
return {
"doc_id": self.doc_id,
"sentences": [sent.to_dict() for sent in self.sentences],
}
def revise_sentences(
sentences: List[Tuple[str, int, int]], doc_id: str
) -> List[Tuple[int, int, str]]:
"""Automatic sentence tokenization may have errors for a few documents.
Args:
sentences (List[Tuple[str, int, int]]): a list of sentence tuples.
doc_id (str): document ID.
Returns:
List[Tuple[str, int, int]]: a list of revised sentence tuples.
"""
sentences_ = []
offset_list = DOCS_TO_REVISE_SENT[doc_id]
first_part_offsets = {offset for offset, _ in offset_list}
second_part_offsets = {offset for _, offset in offset_list}
for sentence_idx, (text, start, end) in enumerate(sentences):
if start in first_part_offsets:
next_text, next_start, next_end = sentences[sentence_idx + 1]
space = " " * (next_start - end)
sentences_.append((text + space + next_text, start, next_end))
elif start in second_part_offsets:
continue
else:
sentences_.append((text, start, end))
return sentences_
def read_sgm_file(path: str, language: str = "english") -> List[Tuple[str, int, int]]:
"""Reads a SGM text file.
Args:
path (str): path to the input file.
language (str): document language. Valid values: "english" or "chinese".
Returns:
List[Tuple[str, int, int]]: a list of sentences. Each item in the list
is a tuple of three elements, sentence text, start offset, and end
offset.
"""
data = open(path, "r", encoding="utf-8").read()
# Chunk the document
chunks = TAG_PATTERN.sub("⁑", data).split("⁑")
# Get the offset | |
num_filters]
X_pad = np.pad(Xr, npad, 'constant')
out_s[0 if self.batch_size == 1 else 1] = int(np.ceil(out_s[0 if self.batch_size == 1 else 1] / stride_par))
out_s[1 if self.batch_size == 1 else 2] = int(np.ceil(out_s[1 if self.batch_size == 1 else 2] / stride_par))
conv_output = np.zeros(out_s)
if self.batch_size != 1:
k_filters = np.expand_dims(k_filters, axis=0)
k_filters = np.repeat(k_filters, im.shape[0], axis=0)
#print(Xr.shape, X_pad.shape, k_filters.shape, conv_output.shape, output_shape)
for posY in range(0, filter_shape0):
for posX in range(0, filter_shape1):
# valid convolution
if self.batch_size == 1:
conv_output += X_pad[posYf:posYf+output_shape[0]:stride_par, posXf:posXf+output_shape[1]:stride_par] * k_filters[posY, posX]
else:
conv_output += X_pad[:, posYf:posYf+output_shape[0]:stride_par, posXf:posXf+output_shape[1]:stride_par] * k_filters[:, posY, posX].reshape(k_filters.shape[0],1,1,k_filters.shape[3])
posXf = posXf + 1
posYf = posYf + 1
posXf = eS1
# End of convolutions
if self.pre_norm:
ax_f = tuple(range(0,len(conv_output.shape)))
if self.batch_size == 1:
ax_f = ax_f[0:-1]
conv_output = (conv_output - np.mean(conv_output, axis=ax_f)) / (np.std(conv_output, axis=ax_f) + 1e-7)
else:
ax_f = ax_f[1:-1]
conv_output = (conv_output - np.mean(conv_output, axis=ax_f).reshape(conv_output.shape[0],1,1,conv_output.shape[3])) / (np.std(conv_output, axis=ax_f).reshape(conv_output.shape[0],1,1,conv_output.shape[3]) + 1e-7)
#conv_output = (conv_output - conv_output.mean()) / (conv_output.std() + 1e-7)
im = self.ActivationFunction(conv_output, 'relu')
#print('Layer output shape:', im.shape, '\n---------------------\n')
return im
def convLayersBackpropagation(self, last_layer_output, prev_cost):
i = len(self.filtersValues) - 1
last_shape = list(last_layer_output.shape)
if self.batch_size != 1:
batch_el = last_shape[0]
last_shape = last_shape[1:] + [batch_el]
error_by_x = np.reshape(prev_cost, last_shape)
"""
if self.batch_size == 1:
num_filters = last_layer_output.shape[2]
else:
num_filters = last_layer_output.shape[3]
"""
self.log('Start of convLayersBackpropagation:', '\n')
#self.log('prev_cost:', prev_cost.shape, prev_cost, '\n')
#self.log('last_layer_output:', last_layer_output.shape, last_layer_output, '\n')
#self.log('error_by_x:', error_by_x.shape, error_by_x, '\n')
#if self.batch_size != 1:
#error_by_x = np.mean(error_by_x, axis=0)
for k_filters in self.filtersValues[::-1]:
X = self.convInputs[i]
if self.batch_size != 1:
X_batchshape = list(X.shape)
X_batch_elements = X_batchshape[0]
X_batchshape = X_batchshape[1:] + [X_batch_elements]
X = np.reshape(X, X_batchshape)
#X = np.mean(X, axis=0)
# to dilate gradient if needed because of stride
if (type(self.convStride) == list):
stride_par = self.convStride[i]
else:
stride_par = self.convStride
if stride_par != 1:
#erShape = error_by_x.shape[0] * stride_par
erShape = (X.shape[0])
if self.batch_size == 1:
error_by_output = np.zeros((erShape, erShape, self.convFilters[i]), dtype=float)
else:
error_by_output = np.zeros((erShape, erShape, self.convFilters[i], batch_el), dtype=float)
#print(error_by_output.shape, error_by_x.shape)
posI = 0
posJ = 0
erx1 = (error_by_x.shape[0])
erx2 = (error_by_x.shape[1])
# Zero-interweave:
for pe_i in range(0, erx1):
for pe_j in range(0, erx2):
error_by_output[posI, posJ] = error_by_x[pe_i, pe_j]
if (posJ + 2) < erShape:
posJ = posJ + 2
else:
posJ = posJ + 1
if (posI + 2) < erShape:
posI = posI + 2
else:
posI = posI + 1
posJ = 0
else:
# dE/dO
error_by_output = error_by_x
f_rotated = np.flip(self.filtersValues[i], 0)
f_rotated = np.flip(f_rotated, 1)
# dE/dF
#error_by_filter = self.conv_filters(X, error_by_output, relu=False, stride=1, mode='valid')
# dE/dX
#error_by_x = self.conv_filters(f_rotated, error_by_output, relu=False, stride=1, mode='full')
# Start of convolutions
err_output_shape01 = np.asarray([error_by_output.shape[0], error_by_output.shape[1]])
err_out_shape_d2 = (err_output_shape01 / 2).astype(int)
xshape = np.asarray([X.shape[0], X.shape[1]])
fshape = np.asarray([f_rotated.shape[0], f_rotated.shape[1]])
extraShape = (err_output_shape01 % 2) == 0
eS0 = extraShape[0].astype(int)
eS1 = extraShape[1].astype(int)
err_filt_shape = xshape - err_out_shape_d2*2 + eS0
err_x_shape = fshape + err_out_shape_d2*2 + eS0
num_filters = self.filtersValues[i].shape[-1]
#print(error_by_output.shape, xshape, err_output_shape01, err_out_shape_d2*2, eS0, err_filt_shape)
if self.batch_size == 1:
error_by_filter = np.zeros((err_filt_shape[0], err_filt_shape[1], num_filters))
error_by_x = np.zeros((err_x_shape[0], err_x_shape[1], num_filters))
else:
error_by_filter = np.zeros((err_filt_shape[0], err_filt_shape[1], num_filters, X_batch_elements))
error_by_x = np.zeros((err_x_shape[0], err_x_shape[1], num_filters, X_batch_elements))
err_out_shape0 = error_by_output.shape[0]
err_out_shape1 = error_by_output.shape[1]
fil_shape0 = error_by_filter.shape[0]
fil_shape1 = error_by_filter.shape[1]
ex_shape0 = self.filtersValues[i].shape[0]
ex_shape1 = self.filtersValues[i].shape[1]
posYf = eS0
posXf = eS1
if (len(X.shape) < 3):
Xr = np.expand_dims(X, axis=-1)
Xr = np.repeat(Xr, num_filters, axis=-1)
else:
Xr = X
if (len(Xr.shape) == 3):
X_pad = np.pad(Xr, ((0,eS0), (0,eS1), (0,0)), 'constant')
elif (len(Xr.shape) == 4):
X_pad = np.pad(Xr, ((0,eS0), (0,eS1), (0,0), (0,0)), 'constant')
else: # color image with batch
X_pad = np.pad(Xr, ((0,0), (0,eS0), (0,eS1), (0,0), (0,0)), 'constant')
layer_filters = self.filtersValues[i]
if self.batch_size != 1:
layer_filters = np.expand_dims(layer_filters, axis=-1)
layer_filters = np.repeat(layer_filters, X_batch_elements, axis=-1)
#print(X_pad.shape, error_by_output.shape, error_by_filter.shape, self.filtersValues[i].shape, error_by_output.shape, error_by_x.shape)
for posY in range(0, err_out_shape0):
for posX in range(0, err_out_shape1):
# valid convolution (dE/dF)
error_by_filter += X_pad[posYf:posYf+fil_shape0, posXf:posXf+fil_shape1] * error_by_output[posY, posX]
# full convolution (dE/dX)
error_by_x[posYf:posYf+ex_shape0, posXf:posXf+ex_shape1] += layer_filters * error_by_output[posY, posX]
posXf = posXf + 1
posYf = posYf + 1
posXf = eS1
error_by_x = np.flip(error_by_x, 0)
error_by_x = np.flip(error_by_x, 1)
# End of convolutions
#print(X.shape, X_pad.shape, self.filtersValues[i].shape, error_by_filter.shape, error_by_x.shape, error_by_output.shape)
#self.log('error_by_filter:', error_by_filter[:,:,0], '\n\n')
#self.log('prev filtersValues[i]:', self.filtersValues[i][:,:,0], '\n\n')
#self.log('error_by_x:', error_by_x[:,:,0], '\n\n')
if self.batch_size != 1:
error_by_filter = np.mean(error_by_filter, axis=-1)
#if self.pre_norm:
#ax_f = tuple(range(0,len(error_by_filter[i].shape)))[0:-1]
#error_by_filter = (error_by_filter - np.mean(error_by_filter, axis=ax_f)) / (np.std(error_by_filter, axis=ax_f) + 1e-7)
#error_by_filter = (error_by_filter - error_by_filter.mean()) / (error_by_filter.std() + 1e-7)
# Filters update
self.filtersValues[i] = self.filtersValues[i] - self.learningRateConv * error_by_filter
if self.pre_norm:
ax_f = tuple(range(0,len(self.filtersValues[i].shape)))[0:-1]
self.filtersValues[i] = (self.filtersValues[i] - np.mean(self.filtersValues[i], axis=ax_f)) / (np.std(self.filtersValues[i], axis=ax_f) + 1e-7)
#self.log('filtersValues[i] updated:', self.filtersValues[i][:,:,0], '\n\n')
#self.log('\n-----------------------\n')
i = i - 1
self.log('End of convLayersBackpropagation')
def draw(self, showWeights=False, textSize=9, customRadius=0):
plt.figure(figsize=(10,8))
fig = plt.gcf()
ax = fig.gca()
ax.set_xlim(xmin=0, xmax=1)
ax.set_ylim(ymin=0, ymax=1)
xmin, xmax, ymin, ymax = ax.axis()
xdim = xmax - xmin
ydim = ymax - ymin
space_per_layer = xdim / (len(self.hiddenL) + 1)
x0 = xmin
x1 = xmin + space_per_layer
medio_intervalo = space_per_layer / 2
if customRadius <= 0:
radio = 1 / ((sum(self.hiddenL) + self.n_layer0) * 5)
else:
radio = customRadius
lista_lineas_xy = []
lasth = self.n_layer0
for capa,h in enumerate([self.n_layer0] + self.hiddenL):
space_per_neuron = ydim / h
y0 = ymin
y1 = ymin + space_per_neuron
medio_intervalo_n = space_per_neuron / 2
lista_lineas_xy_pre = []
ne = (lasth * h) - 1
neY = h - 1
for j in range(0, h):
ax.add_patch(plt.Circle(((medio_intervalo + x0), (medio_intervalo_n + y0)), radio, color='r'))
neX = lasth - 1
for xy in lista_lineas_xy:
if True: #j == 2:
plt.plot([xy[0],(medio_intervalo + x0)],[xy[1], (medio_intervalo_n + y0)])
#print(capa, ne, self.hiddenWeights[capa-1][ne])
my = ((medio_intervalo_n + y0) - xy[1])
mx = ((medio_intervalo + x0) - xy[0])
pendiente = my / mx
ordenada_origen = xy[1] - pendiente * xy[0]
margen_ord = 0.015
if pendiente < 0:
margen_ord = -0.045 # para compensar la rotacion del texto
ordenada_origen = ordenada_origen + margen_ord # para evitar que el texto salga encima de la linea no sobre ella
# aleatorio entre las x del segmento de la recta (menos un margen para que no salga demasiado cerca de la neurona)
mx2 = random.uniform(xy[0] + 0.04, (medio_intervalo + x0) - 0.04)
my2 = pendiente*mx2 + ordenada_origen
alfa = math.degrees(math.atan(pendiente))
if showWeights:
#print(h, capa-1, neX, neY)
text(mx2, my2, round(self.hiddenWeights[capa-1][neX][neY],3), rotation = alfa, fontsize = textSize)
ne = ne - 1
neX = neX - 1
lista_lineas_xy_pre.append([(medio_intervalo + x0), (medio_intervalo_n + y0)])
neY = neY - 1
y0 = y0 + space_per_neuron
y1 = y1 + space_per_neuron
lasth = h
#print('\n')
x0 = x0 + space_per_layer
x1 = x1 + space_per_layer
#print('-------------\n')
lista_lineas_xy = lista_lineas_xy_pre
plt.show()
def importModel(self, path='', filename='ConvNetAbel_model'):
self.hiddenWeights = np.load(path + filename + '_weights.npy', allow_pickle=True)
mConfig = np.load(path + filename + '_config.npy', allow_pickle=True)
self.n_layer0 = int(mConfig[0])
self.showLogs = bool(mConfig[1])
self.lastLayerNeurons = int(mConfig[2])
self.numEpochs = int(mConfig[3])
self.learningRate = float(mConfig[4])
self.debugMode = int(mConfig[5])
self.softmax = bool(mConfig[6])
self.activationFunction = str(mConfig[7])
self.verbose = bool(mConfig[8])
self.use = str(mConfig[9])
self.batch_size = int(mConfig[10])
self.batch_gradient = str(mConfig[11])
self.batch_mult = int(mConfig[12])
self.dropout = float(mConfig[13])
self.pre_norm = bool(mConfig[14])
self.shuffle = bool(mConfig[15])
self.iterationDrop = float(mConfig[16])
self.version_importedModel = mConfig[17]
self.hiddenL2 = mConfig[18]
self.hiddenL = mConfig[19]
convConfig = np.load(path + filename + '_convConfig.npy', allow_pickle=True)
self.convFilters = convConfig[0]
self.convStride = convConfig[1]
self.convFilterSizes = convConfig[2]
self.kernel_initializer = str(convConfig[3])
self.convEpochs = int(convConfig[4])
self.learningRateConv = float(convConfig[5])
self.filtersValues = np.load(path + filename + '_filtersValues.npy', allow_pickle=True)
if self.debugMode > 0:
self.meanCostByEpoch = np.load(path + filename + '_meanCostByEpoch.npy', allow_pickle=True).tolist()
| |
durations_act_to = activities.get_durations(ids_act_to)
ids_fac_from = map_ids_fac_from[ids_person_act]
ids_fac_to = activities.ids_facility[ids_act_to]
centroids_from = facilities.centroids[ids_fac_from]
centroids_to = facilities.centroids[ids_fac_to]
# origin edge and position
ids_edge_from = facilities.ids_roadedge_closest[ids_fac_from]
poss_edge_from = facilities.positions_roadedge_closest[ids_fac_from]
# destination edge and position
ids_edge_to = facilities.ids_roadedge_closest[ids_fac_to]
poss_edge_to = facilities.positions_roadedge_closest[ids_fac_to]
#ids_stop_from = ptstops.get_closest(centroids_from)
#ids_stop_to = ptstops.get_closest(centroids_to)
#ids_stopedge_from = ids_laneedge[ids_stoplane[ids_stop_from]]
#ids_stopedge_to = ids_laneedge[ids_stoplane[ids_stop_to]]
# do random pos here
# poss_stop_from = 0.5*( ptstops.positions_from[ids_stop_from]\
# +ptstops.positions_to[ids_stop_from])
# poss_stop_to = 0.5*( ptstops.positions_from[ids_stop_to]\
# +ptstops.positions_to[ids_stop_to])
i = 0.0
for id_person, id_plan, time_from, id_act_from, id_act_to, name_acttype_to, duration_act_to, id_edge_from, pos_edge_from, id_edge_to, pos_edge_to, \
in zip(ids_person_act, ids_plan, times_from, ids_act_from, ids_act_to, names_acttype_to, durations_act_to, ids_edge_from, poss_edge_from, ids_edge_to, poss_edge_to):
n_pers = len(ids_person_act)
if logger:
logger.progress(i/n_pers*100)
i += 1.0
print 79*'_'
print ' id_plan=%d, id_person=%d, ' % (id_plan, id_person)
id_stage_walk1, time = walkstages.append_stage(id_plan, time_from,
id_edge_from=id_edge_from,
position_edge_from=pos_edge_from,
id_edge_to=id_edge_to,
position_edge_to=pos_edge_to, # -7.0,
)
# update time for trips estimation for this plan
plans.times_est[id_plan] += time-time_from
# define current end time without last activity duration
plans.times_end[id_plan] = time
id_stage_act, time = activitystages.append_stage(
id_plan, time,
ids_activity=id_act_to,
names_activitytype=name_acttype_to,
durations=duration_act_to,
ids_lane=edges.ids_lanes[id_edge_to][0],
positions=pos_edge_to,
)
# store time for next iteration in case other activities are
# following
map_times[id_person] = time
# select persons and activities for next setp
ind_act += 1
ids_person_act, ids_act_from, ids_act_to\
= virtualpop.get_activities_from_pattern(ind_act, ids_person=ids_person_act)
class AutoStrategy(StrategyMixin):
def __init__(self, ident, parent=None,
name='Auto strategy',
info='With this strategy, the person uses his private auto as main transport mode.',
**kwargs):
self._init_objman(ident, parent, name=name, info=info, **kwargs)
attrsman = self.set_attrsman(cm.Attrsman(self))
# specific init
self._init_attributes()
self._init_constants()
def _init_attributes(self):
# print 'StrategyMixin._init_attributes'
pass
def _init_constants(self):
#virtualpop = self.get_virtualpop()
#stagetables = virtualpop.get_stagetables()
#self._walkstages = stagetables.get_stagetable('walks')
#self._ridestages = stagetables.get_stagetable('rides')
#self._activitystages = stagetables.get_stagetable('activities')
#self._plans = virtualpop.get_plans()
#
# print 'AutoStrategy._init_constants'
# print dir(self)
# self.get_attrsman().do_not_save_attrs(['_activitystages','_ridestages','_walkstages','_plans'])
modes = self.get_virtualpop().get_scenario().net.modes
self._id_mode_bike = modes.get_id_mode('bicycle')
self._id_mode_auto = modes.get_id_mode('passenger')
self._id_mode_moto = modes.get_id_mode('motorcycle')
self.get_attrsman().do_not_save_attrs([
'_id_mode_bike', '_id_mode_auto', '_id_mode_moto',
])
def preevaluate(self, ids_person):
"""
Preevaluation strategies for person IDs in vector ids_person.
Returns a preevaluation vector with a preevaluation value
for each person ID. The values of the preevaluation vector are as follows:
-1 : Strategy cannot be applied
0 : Stategy can be applied, but the preferred mode is not used
1 : Stategy can be applied, and preferred mode is part of the strategy
2 : Strategy uses predomunantly preferred mode
"""
n_pers = len(ids_person)
print 'Autostrategy.preevaluate', n_pers, 'persons'
persons = self.get_virtualpop()
preeval = np.zeros(n_pers, dtype=np.int32)
# put -1 for persons without car access
preeval[persons.ids_iauto[ids_person] == -1] = -1
print ' persons having no auto', len(np.flatnonzero(persons.ids_iauto[ids_person] == -1))
# put 0 for persons with car but with a different preferred mode
preeval[(persons.ids_iauto[ids_person] > -1)
& (persons.ids_mode_preferred[ids_person] != self._id_mode_auto)] = 0
print ' persons with car but with a different preferred mode', len(np.flatnonzero(
(persons.ids_iauto[ids_person] > -1) & (persons.ids_mode_preferred[ids_person] != self._id_mode_auto)))
# put 2 for persons with car access and who prefer the car
preeval[(persons.ids_iauto[ids_person] > -1)
& (persons.ids_mode_preferred[ids_person] == self._id_mode_auto)] = 2
print ' persons with car access and who prefer the car', len(np.flatnonzero(
(persons.ids_iauto[ids_person] > -1) & (persons.ids_mode_preferred[ids_person] == self._id_mode_auto)))
return preeval
# def are_feasible(self, ids_person):
# """
# Returns a bool vector, with True values for
# persons where this strategy can be applied.
# """
# persons = self.get_virtualpop()
#
# # check if person has a car
# # one may also check if there is parking available
# # at all desinations
# return persons.ids_iautos[ids_person] >= 0
def plan(self, ids_person, logger=None):
"""
Generates a plan for these person according to this strategie.
Overriden by specific strategy.
"""
#make_plans_private(self, ids_person = None, mode = 'passenger')
# routing necessary?
virtualpop = self.get_virtualpop()
plans = virtualpop.get_plans() # self._plans
walkstages = plans.get_stagetable('walks')
ridestages = plans.get_stagetable('autorides')
activitystages = plans.get_stagetable('activities')
activities = virtualpop.get_activities()
activitytypes = virtualpop.get_demand().activitytypes
landuse = virtualpop.get_landuse()
facilities = landuse.facilities
parking = landuse.parking
scenario = virtualpop.get_scenario()
edges = scenario.net.edges
lanes = scenario.net.lanes
modes = scenario.net.modes
#times_est_plan = plans.times_est
# here we can determine edge weights for different modes
plans.prepare_stagetables(['walks', 'autorides', 'activities'])
# get initial travel times for persons.
# initial travel times depend on the initial activity
landuse.parking.clear_booking()
ids_person_act, ids_act_from, ids_act_to\
= virtualpop.get_activities_from_pattern(0, ids_person=ids_person)
if len(ids_person_act) == 0:
print 'WARNING in Autostrategy.plan: no eligible persons found.'
return False
# ok
# temporary maps from ids_person to other parameters
nm = np.max(ids_person_act)+1
map_ids_plan = np.zeros(nm, dtype=np.int32)
#ids_plan_act = virtualpop.add_plans(ids_person_act, id_strategy = self.get_id_strategy())
map_ids_plan[ids_person_act] = virtualpop.add_plans(ids_person_act, id_strategy=self.get_id_strategy())
# err
map_times = np.zeros(nm, dtype=np.int32)
map_times[ids_person_act] = activities.get_times_end(ids_act_from, pdf='unit')
# set start time to plans (important!)
plans.times_begin[map_ids_plan[ids_person_act]] = map_times[ids_person_act]
map_ids_fac_from = np.zeros(nm, dtype=np.int32)
map_ids_fac_from[ids_person_act] = activities.ids_facility[ids_act_from]
# err
map_ids_parking_from = np.zeros(nm, dtype=np.int32)
ids_parking_from, inds_vehparking = parking.get_closest_parkings(virtualpop.ids_iauto[ids_person_act],
facilities.centroids[activities.ids_facility[ids_act_from]])
if len(ids_parking_from) == 0:
return False
# err
map_ids_parking_from[ids_person_act] = ids_parking_from
n_plans = len(ids_person_act)
print 'AutoStrategy.plan n_plans=', n_plans
# print ' map_ids_parking_from[ids_person_act].shape',map_ids_parking_from[ids_person_act].shape
# set initial activity
# this is because the following steps start with travel
# and set the next activity
#names_acttype_from = activitytypes.names[activities.ids_activitytype[ids_act_from]]
# for id_plan
ind_act = 0
# make initial activity stage
ids_edge_from = facilities.ids_roadedge_closest[map_ids_fac_from[ids_person_act]]
poss_edge_from = facilities.positions_roadedge_closest[map_ids_fac_from[ids_person_act]]
# this is the time when first activity starts
# first activity is normally not simulated
names_acttype_from = activitytypes.names[activities.ids_activitytype[ids_act_from]]
durations_act_from = activities.get_durations(ids_act_from)
times_from = map_times[ids_person_act]-durations_act_from
#times_from = activities.get_times_end(ids_act_from, pdf = 'unit')
for id_plan,\
time,\
id_act_from,\
name_acttype_from,\
duration_act_from,\
id_edge_from,\
pos_edge_from \
in zip(map_ids_plan[ids_person_act],
times_from,
ids_act_from,
names_acttype_from,
durations_act_from,
ids_edge_from,
poss_edge_from):
id_stage_act, time = activitystages.append_stage(
id_plan, time,
ids_activity=id_act_from,
names_activitytype=name_acttype_from,
durations=duration_act_from,
ids_lane=edges.ids_lanes[id_edge_from][0],
positions=pos_edge_from,
)
# main loop while there are persons performing
# an activity at index ind_act
while len(ids_person_act) > 0:
ids_plan = map_ids_plan[ids_person_act]
ids_veh = virtualpop.ids_iauto[ids_person_act]
#inds_pers = virtualpop.get_inds(ids_person)
# self.persons.cols.mode_preferred[inds_pers]='private'
times_from = map_times[ids_person_act]
names_acttype_to = activitytypes.names[activities.ids_activitytype[ids_act_to]]
durations_act_to = activities.get_durations(ids_act_to)
ids_fac_from = map_ids_fac_from[ids_person_act]
ids_fac_to = activities.ids_facility[ids_act_to]
centroids_to = facilities.centroids[ids_fac_to]
# origin edge and position
ids_edge_from = facilities.ids_roadedge_closest[ids_fac_from]
poss_edge_from = facilities.positions_roadedge_closest[ids_fac_from]
# this method will find and occupy parking space
ids_parking_from = map_ids_parking_from[ids_person_act]
# print ' ids_veh.shape',ids_veh.shape
# print ' centroids_to.shape',centroids_to.shape
ids_parking_to, inds_vehparking = parking.get_closest_parkings(ids_veh, centroids_to)
ids_lane_parking_from = parking.ids_lane[ids_parking_from]
ids_edge_parking_from = lanes.ids_edge[ids_lane_parking_from]
poss_edge_parking_from = parking.positions[ids_parking_from]
# print ' ids_parking_to.shape',ids_parking_to.shape
# print ' np.max(parking.get_ids()), np.max(ids_parking_to)',np.max(parking.get_ids()), np.max(ids_parking_to)
ids_lane_parking_to = parking.ids_lane[ids_parking_to]
ids_edge_parking_to = lanes.ids_edge[ids_lane_parking_to]
poss_edge_parking_to = parking.positions[ids_parking_to]
# destination edge and position
ids_edge_to = facilities.ids_roadedge_closest[ids_fac_to]
poss_edge_to = facilities.positions_roadedge_closest[ids_fac_to]
i = 0.0
n_pers = len(ids_person_act)
for id_person, id_plan, time_from, id_act_from, id_act_to, name_acttype_to, duration_act_to, id_veh, id_edge_from, pos_edge_from, id_edge_parking_from, pos_edge_parking_from, id_parking_from, id_parking_to, id_edge_parking_to, pos_edge_parking_to, id_edge_to, pos_edge_to\
in zip(ids_person_act, ids_plan, times_from, ids_act_from, ids_act_to, names_acttype_to, durations_act_to, ids_veh, ids_edge_from, poss_edge_from, ids_edge_parking_from, poss_edge_parking_from, ids_parking_from, ids_parking_to, ids_edge_parking_to, poss_edge_parking_to, ids_edge_to, poss_edge_to):
if logger:
logger.progress(i/n_pers*100)
i += 1.0
#plans.set_row(id_plan, ids_person = id_person, ids_strategy = self.get_id_strategy())
#times_est_plan[id_plan] = time-time_start
# map_times[id_person] = self.plan_activity(\
# id_person, id_plan, time_from,
# id_act_from, id_act_to,
# name_acttype_to, duration_act_to,
# id_veh,
# id_edge_from, pos_edge_from,
# id_parking_from, id_edge_parking_from, pos_edge_parking_from,
# id_parking_to, id_edge_parking_to, pos_edge_parking_to,
# id_edge_to, pos_edge_to, edges.ids_lanes[id_edge_to][0])
# start creating stages for activity
id_stage_walk1, time = walkstages.append_stage(
id_plan, time_from,
id_edge_from=id_edge_from,
position_edge_from=pos_edge_from,
id_edge_to=id_edge_parking_from,
position_edge_to=pos_edge_parking_from-1.5, # wait 1.5 m before nose of parked car
)
# ride from car parking to road edge near activity
id_stage_car, time = ridestages.append_stage(
id_plan, time,
id_veh=id_veh,
# delay to be sure that person arrived!(workaround in combination with parking=False)
time_init=time+30, # time_from,
id_parking_from=id_parking_from,
id_parking_to=id_parking_to,
# TODO: here we could use id_edge_to as via edge to emulate search for parking
)
if id_stage_car >= 0:
# print ' car ride successful'
id_stage_walk2, time = walkstages.append_stage(
id_plan, time,
id_edge_from=id_edge_parking_to,
position_edge_from=pos_edge_parking_to-1.5, # ecessary?
id_edge_to=id_edge_to,
position_edge_to=pos_edge_to,
)
else:
# print ' parking not connected or distance too short, modify first walk and go directly to activity'
# print ' id_stage_walk1',id_stage_walk1,type(id_stage_walk1)
# print ' id_edge_from',id_edge_from
# print ' position_edge_from',position_edge_from
# print ' id_edge_to',id_edge_to
# print ' position_edge_to',position_edge_to
time = walkstages.modify_stage(
id_stage_walk1, time_from,
id_edge_from=id_edge_from,
position_edge_from=pos_edge_from,
id_edge_to=id_edge_to,
position_edge_to=pos_edge_to,
)
# store time estimation for this plan
# note that these are the travel times, no activity time
plans.times_est[id_plan] += time-time_from
# define current end time without last activity duration
plans.times_end[id_plan] = time
# finally add activity and respective duration
id_stage_act, time = activitystages.append_stage(
id_plan, time,
ids_activity=id_act_to,
names_activitytype=name_acttype_to,
durations=duration_act_to,
ids_lane=edges.ids_lanes[id_edge_to][0],
positions=pos_edge_to,
)
map_times[id_person] = time
# return time
##
# select persons and activities for next setp
ind_act += | |
# Software License Agreement (BSD License)
#
# Copyright (c) 2009, <NAME>, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of <NAME>, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Revision $Id: masterapi.py 9672 2010-05-11 21:57:40Z kwc $
"""
Python adapter for calling ROS Master API. While it is trivial to call the
Master directly using XML-RPC, this API provides a safer abstraction in the event
the Master API is changed.
"""
try:
from xmlrpc.client import ServerProxy # Python 3.x
except ImportError:
from xmlrpclib import ServerProxy # Python 2.x
from . names import make_caller_id
from . rosenv import get_master_uri
from . network import parse_http_host_and_port
from rospy.impl.broadcast_manager import BroadcastManager
class MasterException(Exception):
"""
Base class of ROS-master related errors.
"""
pass
class MasterFailure(MasterException):
"""
Call to Master failed. This generally indicates an internal error
in the Master and that the Master may be in an inconsistent state.
"""
pass
class MasterError(MasterException):
"""
Master returned an error code, which indicates an error in the
arguments passed to the Master.
"""
pass
# backwards compat
ROSMasterException = MasterException
Error = MasterError
Failure = MasterFailure
def is_online(master_uri=None):
"""
@param master_uri: (optional) override environment's ROS_MASTER_URI
@type master_uri: str
@return: True if Master is available
"""
return Master('rosgraph', master_uri=master_uri).is_online()
class Master(object):
"""
API for interacting with the ROS master. Although the Master is
relatively simple to interact with using the XMLRPC API, this
abstraction layer provides protection against future updates. It
also provides a streamlined API with builtin return code checking
and caller_id passing.
"""
def __init__(self, caller_id, master_uri=None):
"""
:param caller_id: name of node to use in calls to master, ``str``
:param master_uri: (optional) override default ROS master URI, ``str``
:raises: :exc:`ValueError` If ROS master uri not set properly
"""
if master_uri is None:
master_uri = get_master_uri()
self._reinit(master_uri)
self.caller_id = make_caller_id(caller_id) #resolve
if self.caller_id[-1] == '/':
self.caller_id = self.caller_id[:-1]
self.bm = BroadcastManager(name=self.caller_id)
def _reinit(self, master_uri):
"""
Internal API for reinitializing this handle to be a new master
:raises: :exc:`ValueError` If ROS master uri not set
"""
if master_uri is None:
raise ValueError("ROS master URI is not set")
# #1730 validate URL for better error messages
try:
parse_http_host_and_port(master_uri)
except ValueError:
raise ValueError("invalid master URI: %s"%(master_uri))
self.master_uri = master_uri
self.handle = ServerProxy(self.master_uri)
def is_online(self):
"""
Check if Master is online.
NOTE: this is not part of the actual Master API. This is a convenience function.
@param master_uri: (optional) override environment's ROS_MASTER_URI
@type master_uri: str
@return: True if Master is available
"""
try:
self.getPid()
return True
except:
return False
def _succeed(self, args):
"""
Check master return code and return the value field.
@param args: master return value
@type args: (int, str, XMLRPCLegalValue)
@return: value field of args (master return value)
@rtype: XMLRPCLegalValue
@raise rosgraph.masterapi.Error: if Master returns ERROR.
@raise rosgraph.masterapi.Failure: if Master returns FAILURE.
"""
code, msg, val = args
if code == 1:
return val
elif code == -1:
raise Error(msg)
else:
raise Failure(msg)
################################################################################
# PARAM SERVER
def deleteParam(self, key):
"""
Parameter Server: delete parameter
@param key: parameter name
@type key: str
@return: 0
@rtype: int
"""
return self._succeed(self.handle.deleteParam(self.caller_id, key))
def setParam(self, key, value):
"""
Parameter Server: set parameter. NOTE: if value is a
dictionary it will be treated as a parameter tree, where key
is the parameter namespace. For example:::
{'x':1,'y':2,'sub':{'z':3}}
will set key/x=1, key/y=2, and key/sub/z=3. Furthermore, it
will replace all existing parameters in the key parameter
namespace with the parameters in value. You must set
parameters individually if you wish to perform a union update.
@param key: parameter name
@type key: str
@param value: parameter value.
@type value: XMLRPCLegalValue
@return: 0
@rtype: int
"""
return self._succeed(self.handle.setParam(self.caller_id, key, value))
def getParam(self, key):
"""
Retrieve parameter value from server.
@param key: parameter to lookup. If key is a namespace,
getParam() will return a parameter tree.
@type key: str
getParam() will return a parameter tree.
@return: parameterValue. If key is a namespace,
the return value will be a dictionary, where each key is a
parameter in that namespace. Sub-namespaces are also
represented as dictionaries.
@rtype: XMLRPCLegalValue
"""
return self._succeed(self.handle.getParam(self.caller_id, key))
def searchParam(self, key):
"""
Search for parameter key on parameter server. Search starts in caller's namespace and proceeds
upwards through parent namespaces until Parameter Server finds a matching key.
searchParam's behavior is to search for the first partial match.
For example, imagine that there are two 'robot_description' parameters::
/robot_description
/robot_description/arm
/robot_description/base
/pr2/robot_description
/pr2/robot_description/base
If I start in the namespace /pr2/foo and search for
'robot_description', searchParam will match
/pr2/robot_description. If I search for 'robot_description/arm'
it will return /pr2/robot_description/arm, even though that
parameter does not exist (yet).
@param key: parameter key to search for.
@type key: str
@return: foundKey
@rtype: str
"""
return self._succeed(self.handle.searchParam(self.caller_id, key))
def subscribeParam(self, caller_api, key):
"""
Retrieve parameter value from server and subscribe to updates to that param. See
paramUpdate() in the Node API.
@param key: parameter to lookup.
@type key: str
@param caller_api: API URI for paramUpdate callbacks.
@type caller_api: str
@return: parameterValue. parameterValue is an empty dictionary if the parameter has not been set yet.
@rtype: XMLRPCLegalValue
"""
return self._succeed(self.handle.subscribeParam(self.caller_id, caller_api, key))
def unsubscribeParam(self, caller_api, key):
"""
Retrieve parameter value from server and subscribe to updates to that param. See
paramUpdate() in the Node API.
@param key: parameter to lookup.
@type key: str
@param caller_api: API URI for paramUpdate callbacks.
@type caller_api: str
@return: numUnsubscribed. If numUnsubscribed is zero it means that the caller was not subscribed to the parameter.
@rtype: int
"""
return self._succeed(self.handle.unsubscribeParam(self.caller_id, caller_api, key))
def hasParam(self, key):
"""
Check if parameter is stored on server.
@param key: parameter to check
@type key: str
@return: [code, statusMessage, hasParam]
@rtype: [int, str, bool]
"""
return self._succeed(self.handle.hasParam(self.caller_id, key))
def getParamNames(self):
"""
Get list of all parameter names stored on this server.
This does not adjust parameter names for caller's scope.
@return: [code, statusMessage, parameterNameList]
@rtype: [int, str, [str]]
"""
return self._succeed(self.handle.getParamNames(self.caller_id))
################################################################################
def getPid(self):
"""
Get the PID of this server
@return: serverProcessPID
@rtype: int
@raise rosgraph.masterapi.Error: if Master returns ERROR.
@raise rosgraph.masterapi.Failure: if Master returns FAILURE.
"""
return self._succeed(self.handle.getPid(self.caller_id))
def getUri(self):
"""
Get the URI of this Master
@return: masterUri
@rtype: str
@raise rosgraph.masterapi.Error: if Master returns ERROR.
@raise rosgraph.masterapi.Failure: if Master returns FAILURE.
"""
return self._succeed(self.handle.getUri(self.caller_id))
def registerService(self, service, service_api, caller_api):
"""
Register the caller as a provider of the specified service.
@param service str: Fully-qualified name of service
@param service_api str: Service URI
@param caller_api str: XML-RPC URI of caller node
@return: ignore
@rtype: int
@raise rosgraph.masterapi.Error: if Master returns ERROR.
@raise rosgraph.masterapi.Failure: if Master returns FAILURE.
"""
return self._succeed(self.bm.registerService(self.caller_id, service, service_api, caller_api))
def lookupService(self, service):
| |
#Import the relevant libraries
import numpy as np
import csv
import random
## Train data
# Loading in data using csv and np
path = 'F:/Data Mining/Assignment 1/data'
train = open(path + '/train.data', 'rt')
reader = csv.reader(train, delimiter=',', quoting=csv.QUOTE_NONE)
x = list(reader)
train_data = np.array(x)
#Create empty lists for the pairs of classes
class1_2 = []
class2_3 = []
class1_3 = []
#Separate data into pairs of classes
for row in train_data:
if row[4] == 'class-1':
#Append the row to the relevant pairs
class1_2.append(row)
class1_3.append(row)
if row[4] == 'class-2':
class2_3.append(row)
class1_2.append(row)
if row[4] == 'class-3':
class1_3.append(row)
class2_3.append(row)
## Test data
# Repeat the same steps are previously but with test.data
test = open(path + '/test.data', 'rt')
reader = csv.reader(test, delimiter=',', quoting=csv.QUOTE_NONE)
x = list(reader)
test_data = np.array(x)
class1_2_t = []
class2_3_t = []
class1_3_t = []
for row in test_data :
if row[4] == 'class-1':
class1_2_t.append(row)
class1_3_t.append(row)
if row[4] == 'class-2':
class2_3_t.append(row)
class1_2_t.append(row)
if row[4] == 'class-3':
class1_3_t.append(row)
class2_3_t.append(row)
##############################################################################
"""
The Binary Perceptron function assigns the first class read in as -1, and the
second class as 1.
This function will take in a pair of classes in a list, weights, bias and a
string defining whether the data is "Train" or "Test".
It will either produce weights and a bias (if defined as a train data), or
apply the weights of the train data to the test data to predict the
classification of each instance.
"""
def PerceptronBinary(data, w, b,trainOrTest):
#Split the data into two, with the features in x, and class' in y
data = np.hsplit((np.array(data)),
np.array([4, 8]))
x = data[0].astype(float)
y = np.array(np.unique(data[1], return_inverse=True))
#Retain the names of the classes for printing later
name1 = y[0][0]
name2 = y[0][1]
y = np.array(y[1])
#Convert the 0 in y to -1
y[y < 1] = -1
#Create variables for pocket algorithm
bestW = w
bestB = b
bestAcc = 0
#If function is defined as test, run 1 iteration
if trainOrTest != "Train":
num_iterations = 1
#If function is defined as train, run 20 iterations
else:
num_iterations = 20
#Create variables for weights and bias
w = [0.0, 0.0, 0.0, 0.0]
b = 0
#For the number of iterations
for epoch in range(num_iterations):
#Change accuracy to 0
acc = 0
#Join together the x and y and shuffle the data
zipedList = list(zip(x, y))
random.shuffle(zipedList)
x, y = zip(*zipedList)
#For each row in x, set activation to 0
for i in range(len(x)):
a = 0
#For each feature in each row, calculate the activation
for j in range(len(x[i])):
a += (w[j] * x[i][j]) + b
#If the a > 0, adjust to 1, if a < 0 then change to -1
if a > 0 :
a = 1
else :
a = -1
#If the activation * the classification is <= 0 then update
# weights and bias on train dataset; otherwise, increase accuracy
# score by 1.
if (a * y[i]) <= 0:
if trainOrTest == "Train":
for j in range(len(w)):
w[j] = w[j] + (y[i] * x[i][j])
b += y[i]
else:
acc += 1
#If the accuracy recorded is greater than the bestAccuracy recorded,
# then update the bestAcc, and the weights and bias if train data
if bestAcc < acc:
bestAcc = acc
if trainOrTest == "Train":
bestW = w.copy()
bestB = b
#Print the model accuracies for train and test models
print(trainOrTest,"model accuracy for", name1, "/",name2+":", ((bestAcc) / len(x)) * 100, "%")
#Print how many lines were correct
print("\tGot: ", (bestAcc), "/", len(y), "lines correct\n")
#If the data was training data, then return the bestWeights and bestBias
if trainOrTest == "Train":
return bestW, bestB
else:
return
##############################################################################
"""
The Multi-Class Perceptron function utilises the 1-vs-rest algorithm, in which
the class of interest is given a 1, and the other classes are assigned -1.
This function will take in a whole dataset with three classes, weights and
bias in an array, and a string defining whether the data is "Train" or "Test".
It will either produce an array of weights and an array of bias values (if
defined as a train data), or apply the weights and bias of the train data to
the test data to predict the classification of each instance.
"""
def PerceptronMultiClass(data,wArray,bArray,trainOrTest):
#Split the data into two, with the features in x, and class' in y
data = np.hsplit((np.array(data)),
np.array([4, 8]))
x = data[0].astype(float)
y = np.array(np.unique(data[1], return_inverse=True))
y = np.array(y[1])
#Define coefficient for l2 regularisation
#coeff = 0.01
#Create variables for pocket algorithm
bestmultiW = []
bestmultiB = []
#Create a copy of y
z = y.copy()
#For the number of classes in dataset
for i in range(3):
#Reset bestAccuracy to 0
bestAcc = 0
#If data is train, reset the weights, bias, bestW, bestB and
#set the number of iterations to 20
if trainOrTest == "Train":
w = [0.0, 0.0, 0.0, 0.0]
b = 0
bestW = []
bestB = 0
num_iterations = 20
#If data is test, set the weight and bias to the relevant loop, and
#set the model iterations to 1
else:
w = wArray[i]
b = bArray[i]
num_iterations = 1
#For the number of values in z
for j in range(z.shape[0]):
#If the number == 2, then change to 1, otherwise change to -1
if z[j] == 2:
y[j] = 1
else:
y[j] = -1
#Add 1 to z for the next loop
z += 1
y = np.array(y)
#For the number of iterations
for epoch in range(num_iterations):
#Change accuracy to 0
acc = 0
#Join together the x and y and shuffle the data
zipedList = list(zip(x, y))
random.shuffle(zipedList)
x, y = zip(*zipedList)
#For each row in x, set activation to 0
for k in range(len(x)):
a = 0.0
#For each feature in each row, calculate the activation
for m in range(len(x[k])):
a += (w[m] * x[k][m]) + b
#If the activation * the classification is <= 0 then update
# weights and bias on train dataset; otherwise, increase accuracy
# score by 1.
if (a * y[k]) <= 0:
if trainOrTest == "Train":
for j in range(len(w)):
w[m] = w[m] + (y[k] * x[k][m])
#w[m] = w[m] + (y[k] * x[k][m]) - (2*coeff*w[m])
b += y[k]
else:
acc += 1
#If the accuracy recorded is greater than the bestAccuracy recorded,
# then update the bestAcc, and the weights and bias if train data
if bestAcc < acc:
bestAcc = acc
if trainOrTest == "Train":
bestW = w.copy()
bestB = b
#Print the model accuracies for train and test models
print(trainOrTest,"model accuracy for Class", 3-i, ":", round((bestAcc/len(x) *100), 2), "%")
#Print how many lines were correct
print("\tGot:", (bestAcc), "/", len(y), "lines correct\n")
#If the data is train, append the bestWeights and bestBias of each
# loop of the function to bestmultiW and bestmultiB
if trainOrTest == "Train":
bestmultiW.append(bestW)
bestmultiB.append(bestB)
#Reset x and y ready for the next loop of the function
x = data[0].astype(float)
y = np.array(np.unique(data[1], return_inverse=True))
y = np.array(y[1])
#If the data was training data, then return the bestWeights and bestBias
if trainOrTest == "Train":
return bestmultiW, bestmultiB
else:
return
##############################################################################
## Run the models ##
"""
Binary Perceptron
For the train model:
Change the data within the function to class1_2, class2_3 or class1_3
Keep the weights and bias as 0
For the test model:
Change the data within function to class1_2_t, class2_3_t or class1_3_t
Change the weights and bias to w and b
When defining the train or test models, use "Train" or "Test"
"""
#Train model
#Save the weights and bias from the train model
#w, b = PerceptronBinary(class1_2, 0, 0, "Train")
#Test model
#Uses weights and bias saved from train model
#PerceptronBinary(class1_2_t, w, b,"Test")
#Q5 - Print the weights of model for class1_2
#print(w)
"""
Multi-Class Perceptron
For the train model:
Keep the data as train_data
Keep the weights and bias as 0
For the test model:
Keep the data as test_data
Keep the weights and bias as wArray and bArray
When defining the train or | |
u"velocity: rotation",
u"interpretation of experiments: PHENIX",
u"decay: weak interaction",
u"potential: stability",
u"model: relativistic",
u"axion: decay constant",
u"tube",
u"orbifold: singularity",
u"interaction: nonlinear",
u"quarkonium: leptonic decay",
u"detector: surface",
u"anti-B: radiative decay",
u"two-particle",
u"Salam-Weinberg model",
u"stop",
u"space-time: dimension: 3",
u"space-time: dimension: 5",
u"indium",
u"gluon: transverse momentum",
u"BRAHMS",
u"diffraction: production",
u"accelerator: control system",
u"neutrino nucleus: scattering",
u"differential cross section: correction",
u"color: 2",
u"molecule",
u"n-point function: 1",
u"isospin: symmetry",
u"conductivity: electric",
u"B/c: hadronic decay",
u"time: Euclidean",
u"bending magnet",
u"model: scalar",
u"hadron: molecule",
u"electroweak interaction: vacuum state",
u"Sudakov",
u"nucleon resonance: mass",
u"quark: operator",
u"K+: associated production",
u"operator: dimension: 5",
u"positron p: inclusive reaction",
u"space: lens",
u"Z0: production",
u"matter: interaction",
u"pi: formation",
u"excited state: spectrum",
u"expansion: heat kernel",
u"model: nuclear reaction",
u"neutrino: lifetime",
u"threshold: production",
u"group: modular",
u"invariance: Galilei",
u"SO(8)",
u"cosmic coincidence",
u"baryon resonance: hadronic decay",
u"microprocessor",
u"nucleon: polarizability",
u"bottom meson",
u"category: tensor",
u"Calogero model",
u"M-brane: 2",
u"lepton: charged particle",
u"charge: screening",
u"minijet",
u"magnetic monopole: condensation",
u"gluon: Kaluza-Klein",
u"meson: mass difference",
u"coupling: tensor",
u"PSI Cycl",
u"plasma: nonabelian",
u"anti-p p: inelastic scattering",
u"group: exceptional",
u"photon: scattering",
u"pi- p: exclusive reaction",
u"geometry: warped",
u"radion: mass",
u"action: complex",
u"background: geometry",
u"hyperon: hadronic decay",
u"fractal: dimension",
u"spectrum: fluctuation",
u"SO(4)",
u"string model: landscape",
u"Yang-Mills-Higgs theory",
u"field theoretical model",
u"Goldstone particle: mass",
u"radiation: pressure",
u"field theory: algebra",
u"field equations: monopole",
u"expansion: cluster",
u"detector",
u"dimension: spectral",
u"symmetry: SU(3) x SU(3)",
u"weak field",
u"path integral: Euclidean",
u"phi**n model",
u"B: hadroproduction",
u"particle number",
u"coupling constant: gauge",
u"charge dependence",
u"interpretation of experiments: Brookhaven PS",
u"lepton: multiplicity",
u"condensation: vacuum",
u"f0(600): width",
u"Jones polynomial",
u"gauge field theory: SU(5)",
u"gauge field theory: Sp(N)",
u"antineutrino/mu",
u"bound state: energy",
u"diffraction: structure function",
u"hyperon: polarization",
u"B/c: semileptonic decay",
u"Hopf",
u"AdS(3)",
u"coupling: matter",
u"coupling: axial",
u"coupling: electromagnetic",
u"D: hadroproduction",
u"spinor: Majorana",
u"W': mass",
u"Knizhnik-Zamolodchikov equation",
u"compactification: orientifold",
u"pi: charged particle",
u"muon: trigger",
u"interpretation of experiments: STAR",
u"tensor meson",
u"charmed meson: semileptonic decay",
u"sloan digital sky survey",
u"Higgs particle: Goldstone particle",
u"meson baryon: scattering amplitude",
u"operator: Laplace",
u"energy: exchange",
u"fluctuation: statistical",
u"gauge field theory: SU(4)",
u"Soudan",
u"Z0: exchange",
u"detector: technology",
u"D/s: decay constant",
u"Wess-Zumino-Witten model: SU(2)",
u"fuzzy",
u"B0: pair production",
u"particle: exotic",
u"D/s*(2110)",
u"micro-pattern detector",
u"baryon: excited state",
u"SLAC Lab",
u"random phase approximation: quasiparticle",
u"intermediate boson: fusion",
u"temperature: effect",
u"field equations: gravitation",
u"radio wave: emission",
u"J/psi(3100): width",
u"gauge field theory: conformal",
u"electric field: spatial distribution",
u"gravitation: f(T)",
u"space-time: Kaehler",
u"quantum chromodynamics: Theta parameter",
u"gravitation: external field",
u"baryon resonance: dibaryon",
u"expansion: hydrodynamics",
u"quantum chromodynamics: finite temperature",
u"field theory: collective",
u"model: thermal",
u"D/s0*(2317)",
u"time: conformal",
u"pi pi: scattering",
u"numerical methods: variational",
u"interpretation of experiments: PAMELA",
u"electron muon: transition",
u"K+",
u"fermion: tunneling",
u"potential: model",
u"charmed baryon: mass",
u"Georgi-Glashow model",
u"charged particle: rapidity spectrum",
u"factorization: violation",
u"parity: invariance",
u"flavon",
u"Pioneer anomaly",
u"deuteron: photofission",
u"vector meson: hadroproduction",
u"SL(2)",
u"Bianchi",
u"axion: coupling",
u"dark energy: coupling",
u"quantum chromodynamics: thermodynamics",
u"fibre bundle: vector",
u"accelerator: design",
u"X-ray: energy spectrum",
u"longitudinal momentum",
u"Bagger-Lambert-Gustavsson model",
u"nucleon nucleon: inelastic scattering",
u"heavy quark: pair production",
u"null-energy condition: violation",
u"finite temperature: effect",
u"background: time dependence",
u"Theta(1540): width",
u"effect: off-shell",
u"plasma: wake field",
u"electron nucleon: inelastic scattering",
u"hadronization: model",
u"quantum chromodynamics: axion",
u"TOTEM",
u"gravitation: strong field",
u"quantum gravity: Euclidean",
u"Bjorken",
u"decay",
u"non-Gaussianity: primordial",
u"K+: rare decay",
u"parton: interaction",
u"storage ring",
u"ILD detector",
u"resonance: gas",
u"refractive index",
u"analysis: dimensional",
u"model: coupled channel",
u"AdS(5) x S(5)",
u"D: decay constant",
u"scaling: anisotropy",
u"analysis: harmonic",
u"neutrino: scattering",
u"propagator: scalar",
u"width: finite",
u"model: cyclic",
u"initial-state interaction",
u"quark: momentum",
u"charged particle: massive",
u"cascade: electromagnetic",
u"charmed meson: branching ratio",
u"Theta(1540): hadronic decay",
u"neutrino neutrino: interaction",
u"flow: Wilson",
u"chi mesons: radiative decay",
u"flavor: asymmetry",
u"neutrino: VHE",
u"model: ekpyrotic",
u"gravitation: Einstein-Cartan",
u"leptogenesis: thermal",
u"quasiparticle: model",
u"betatron oscillation",
u"Donaldson theory",
u"gauge boson: exchange",
u"cosmic radiation: cascade",
u"p: accelerator",
u"space-time: Gowdy",
u"lepton: magnetic moment",
u"potential: oscillator",
u"hyperon: semileptonic decay",
u"action: spectral",
u"magnetic moment: dipole",
u"omega(783): photoproduction",
u"form factor: parametrization",
u"supergravity: 4",
u"photon: interaction",
u"quark: distribution function",
u"cosmic radiation: spatial distribution",
u"axion-like particles",
u"critical phenomena: superconductivity",
u"pentaquark: mass",
u"string: massive",
u"black hole: acoustic",
u"interpretation of experiments: BELLE",
u"photon: on-shell",
u"gauge field theory: U(2)",
u"field theory: interaction",
u"symmetry: SL(2)",
u"eta: mass",
u"perturbation: electromagnetic",
u"D-brane: 5",
u"spin: 3",
u"attenuation",
u"string: spectrum",
u"transformation: Weyl",
u"f0(1710)",
u"scalar meson: hadronic decay",
u"intermittency",
u"temporal gauge",
u"p: linear accelerator",
u"group: SU(N)",
u"quark antiquark: pair",
u"hadron: decay",
u"soliton: mass",
u"neutrino: wave function",
u"quantum mechanics: model",
u"muon: energy",
u"phase: geometrical",
u"photon: thermal",
u"quantization: Becchi-Rouet-Stora",
u"gravitation: noncommutative",
u"screening: length",
u"quark: deconfinement",
u"noise: spectrum",
u"rho(770): hadronic decay",
u"fermion: excited state",
u"duality: invariance",
u"Homestake",
u"gravitation: nonlinear",
u"background: model",
u"K: production",
u"gauge field theory: boson",
u"a1(1260)",
u"Wilson loop: correlation function",
u"charmed meson: mass",
u"Heavy Quark Effective Theory",
u"Upsilon(10570)",
u"graviton: scattering amplitude",
u"wire",
u"perturbation theory: correction",
u"B/s0: semileptonic decay",
u"gas: pressure",
u"holonomy: Spin(7)",
u"neutrino/e: flux",
u"upgrade: proposed",
u"gluon: shadowing",
u"drift chamber: gas",
u"readout: optical",
u"Lambda Lambda: hypernucleus",
u"quantization: correction",
u"coupling constant: renormalization",
u"target: mass",
u"quadrupole lens",
u"pi: multiplicity",
u"cosmic string: network",
u"associated production",
u"vector meson: leptonic decay",
u"deuteron: binding energy",
u"Morse theory",
u"electron: polarization",
u"mass: solar",
u"p: flux",
u"isospin: density",
u"meson resonance: mass",
u"radiation: thermal",
u"p-brane: 0",
u"background: dependence",
u"Lambda: associated production",
u"photon electron: exclusive reaction",
u"matter: induced",
u"p: model",
u"f0(1500)",
u"heterotic",
u"unitarity: constraint",
u"correlation: quantum",
u"interpretation of experiments: KAMIOKANDE",
u"neutralino nucleon: elastic scattering",
u"ice",
u"oscillation: length",
u"domain wall: BPS",
u"superconductivity: model",
u"black hole: interaction",
u"model: geometrical",
u"cross section: factorization",
u"J/psi(3100): dissociation",
u"quantum chromodynamics: validity test",
u"space-time: Calabi-Yau",
u"valence",
u"expansion: semiclassical",
u"grand unified theory: SU(5) x U(1)",
u"B: wave function",
u"charge distribution",
u"pseudoscalar meson: radiative decay",
u"quantum chromodynamics: radiation",
u"meson: molecule",
u"left-right",
u"structure function: small-x",
u"upsilon mesons: hadroproduction",
u"high temperature expansion",
u"charmed meson: electroproduction",
u"curvature: tensor",
u"total cross section: calculated",
u"field theory: renormalizable",
u"scale: saturation",
u"energy eigenstate",
u"neutrino nucleus: coherent interaction",
u"star: stability",
u"Morita equivalence",
u"rho(770)0: photoproduction",
u"polarization: effect",
u"muon: production",
u"sea: Dirac",
u"quantization: nonperturbative",
u"halo: mass",
u"string model: fragmentation",
u"electron: transverse momentum",
u"K- nucleus: nuclear reaction",
u"radiation: energy",
u"WIMP: velocity",
u"photon gluon: fusion",
u"gas: admixture",
u"nuclear matter: asymmetry",
u"muon+: leptonic decay",
u"neutrino: decay modes",
u"computer: performance",
u"field theory: Kaehler",
u"star: rotation",
u"leptoquark: coupling",
u"gauge boson: decay",
u"cosmological model: anisotropy",
u"quark: family",
u"SU(2) x SU(2)",
u"space-time: dimension: 4",
u"dark matter: hidden sector",
u"B0 anti-B0: mass difference",
u"Mach principle",
u"bottom: hadroproduction",
u"gravitation: acceleration",
u"psi(3770)",
u"quantum chromodynamics: light front",
u"oscillation: frequency",
u"K0(L): leptonic decay",
u"diffusion: model",
u"Lambda/b0: branching ratio",
u"fermion: localization",
u"pi- nucleus: inclusive reaction",
u"electron: spectrum",
u"energy: fluctuation",
u"X-ray: irradiation",
u"renormalization group: effect",
u"top: electroproduction",
u"lead: tungsten",
u"atom: interferometer",
u"interference: quantum",
u"D: branching ratio",
u"space-time: Kerr-Newman",
u"mass: negative",
u"interaction: vector",
u"distribution amplitude",
u"hopping",
u"pomeron: coupling",
u"neutralino: decay modes",
u"formation: time",
u"tachyon: stability",
u"lepton: electric moment",
u"quantum chromodynamics: quenching",
u"nucleon: decay",
u"Sigma",
u"form factor: hadronic",
u"strangeness: enhancement",
u"Einstein-Hilbert",
u"new particle: hadroproduction",
u"vacuum state: Theta parameter",
u"helium: hypernucleus",
u"algebra: Frobenius",
u"structure function: polarization",
u"omega(783): hadroproduction",
u"chromomagnetic",
u"Lambda(1520)",
u"electroweak interaction: mixing angle",
u"coupling: pseudoscalar",
u"photon deuteron: exclusive reaction",
u"K0(L): rare decay",
u"quark model: nonrelativistic",
u"semiconductor: optical",
u"CRESST",
u"group: discrete",
u"baryon: width",
u"meson: multiplet",
u"approximation: static",
u"accelerator: proposed",
u"correction: thermal",
u"dissociation",
u"radiation: yield",
u"coupling: spin: orbit",
u"matter: strong interaction",
u"broadening",
u"NOvA",
u"n: irradiation",
u"nucleon nucleon: scattering amplitude",
u"torus: noncommutative",
u"regularization: heat kernel",
u"Lorentz gauge",
u"mass spectrum: Kaluza-Klein",
u"structure function: slope",
u"scattering amplitude: dipole",
u"black hole: orbit",
u"black hole: perturbation",
u"particle: stability",
u"cross section: enhancement",
u"boson: string model",
u"tau: mass",
u"D*(2010): photoproduction",
u"positron: production",
u"Palatini model",
u"universality: violation",
u"muon: energy loss",
u"quark: form factor",
u"Legendre transformation",
u"fermion: current",
u"lepton: family",
u"gravitation: stability",
u"matter: asymmetry",
u"dilepton: mass",
u"statistical analysis: error",
u"transition radiation detector",
u"string: partition function",
u"binary: mass ratio",
u"Lee-Wick model",
u"field equations: soliton",
u"tensor: Killing-Yano",
u"scale: inflation",
u"Urca process",
u"NA61",
u"neutrino: interference",
u"Lambda: pair production",
u"false vacuum: decay",
u"argon: organic compounds",
u"thrust",
u"K*(892): hadronic decay",
u"electron: wave function",
u"right-handed",
u"hypernucleus: decay",
u"lepton deuteron: deep inelastic scattering",
u"quantum electrodynamics: supersymmetry",
u"Penning trap",
u"weak interaction: coupling constant",
u"gravitation: back reaction",
u"time machine",
u"wave function: light front",
u"gravitational radiation detector: sensitivity",
u"gauge field theory: action",
u"particle identification: efficiency",
u"quantum gravity: canonical",
u"resonance: oscillation",
u"MACRO",
u"Yangian",
u"hydrogen: target",
u"meson: radiative decay",
u"mass: accretion",
u"neutrino antineutrino: annihilation",
| |
Notes
-----
Population has rows which are the names of the individuals (e.g. portfolios)
and columns which are the genes (e.g. assets).
The propagation, i.e. population.history, has rows which are the time stamps,
and columns which are the names of the individuals (e.g. portfolios).
Examples
--------
None
"""
# Method of last return
if fitness_method == "Last Return":
fitness_value = [population.history[x][-1] for x in population.history.columns]
# Method combining last return and average volatility
elif fitness_method == "Last Return and Vol":
# Computing fitness from returns,
# taking the last row value of each columns (i.e. each portfolio)
fitness_from_return = [population.history[x][-1] for x in population.history.columns]
# Defining the environment (i.e. market) correlation over a period of time
# (here it does not really matter which one)
covmat = environment.data.loc[current_eval_date : next_eval_date].corr()
# Loop over portfolios
pop = population.data.filter(regex="Asset")
fitness_from_vol = []
for x in population.history.columns:
# Taking the weights for an output portfolio
weights = pop.loc[x]
# Computing fitness from volatility
fitness_from_vol.append(compute_vol(covmat, weights))
# Normalizing
normalized_fitness_from_return = fitness_from_return / sum(fitness_from_return)
normalized_fitness_from_vol = fitness_from_vol / sum(fitness_from_vol)
# Combining the 2 fitnesses
fitness_value = [ lamb * normalized_fitness_from_return[x]
+ (1-lamb) / normalized_fitness_from_vol[x]
for x in range(len(fitness_from_return)) ]
# Method combining average return and average volatility
elif fitness_method == "Avg Return and Vol":
# Computing fitness from returns,
# taking the last row value of each columns (i.e. each portfolio)
fitness_from_return = [ population.history[x].pct_change()[1:].mean()
for x in population.history.columns ]
# Defining the environment (i.e. market) correlation over a period of time
# (here it does not really matter which one)
covmat = environment.data.loc[current_eval_date : next_eval_date].corr()
# Loop over portfolios
pop = population.data.filter(regex="Asset")
fitness_from_vol = []
for x in population.history.columns:
# Taking the weights for an output portfolio
weights = pop.loc[x]
# Computing fitness from volatility
fitness_from_vol.append(compute_vol(weights, covmat))
# Combining the 2 fitnesses
fitness_value = [ lamb * fitness_from_return[x]
+ (1-lamb) / fitness_from_vol[x]
for x in range(len(fitness_from_return)) ]
# Method based on the Sharpe Ratio
# We assume the risk-free rate is 0% to avoid introducing an arbitrary value here.
elif fitness_method == "Sharpe Ratio":
# Computing fitness from returns,
# taking the last row value of each columns (i.e. each portfolio)
fitness_from_return = [ population.history[x].pct_change()[1:].mean()
for x in population.history.columns ]
# Defining the environment correlation over a period of time
# (here it does not really matter which one)
covmat = environment.data.loc[current_eval_date : next_eval_date].corr()
# Loop over portfolios
pop = population.data.filter(regex="Asset")
fitness_from_vol = []
for x in population.history.columns:
# Taking the weights for an output portfolio
weights = pop.loc[x]
# Computing fitness from volatility
fitness_from_vol.append(compute_vol(weights, covmat))
# Combining the 2 fitnesses
fitness_value = [fitness_from_return[x] / fitness_from_vol[x] for x in range(len(fitness_from_return))]
# Otherwise return Exception
else:
raise ValueError("Specified fitness method does not exist.")
return fitness_value
# VISUALIZATION METHODS
@typechecked
def visualize_portfolios_1(market: Market,
propagation: pd.DataFrame,
evaluation_dates: Union[list, pd.PeriodIndex],
dims: (float, float) = (10, 5),
xlim: float = None,
ylim: float = None,
) -> None:
"""
Allows a quick visualization of the market,
some sparse individuals, and the evaluation dates.
Parameters
----------
market : Market
Market from which we extract data about genes (i.e. assets)
propagation : DataFrame
Propagation of individuals over time.
evaluation_dates : List of Period dates
Dates at which we want to evaluate the individuals.
dims : (float, float)
(Optional) Dimensions of the plot.
xlim : float
(Optional) Range in x.
ylim : float
(Optional) Range in y.
Returns
-------
None
None
"""
# Computing the EW portfolio
market_EW = marketdata.market_EWindex(market)
# Plotting market
axis = market_EW.plot(figsize=dims)
# Plotting individual portfolios
for name in propagation.columns:
propagation[name].plot(ax=axis)
# Plotting evaluation dates
for ed in evaluation_dates:
axis.axvline(x=ed, color='grey', linestyle='--')
# Set axes limits
axis.set_xlim(xlim)
axis.set_ylim(ylim)
return None
@typechecked
def visualize_portfolios_2(market: pd.DataFrame,
marketcap: pd.Series,
propagation: pd.DataFrame,
evaluation_dates: list,
dims: (float, float) = (10, 5),
xlim: float = None,
ylim: float = None,
savefile: bool = False,
namefile: str="Result.png"
) -> None:
"""
Allows a quick visualization of market,
some sparse individuals, and the evaluation dates.
Parameters
----------
market : pd.DataFrame
Market from which we extract data about assets (i.e. genes).
marketcap : pd.Series
Market capitalization of the assets.
propagation : pd.DataFrame
Propagation of individuals over time.
evaluation_dates : List of Period dates
Dates at which we want to evaluate the individuals.
dims : (float, float)
(Optional) Dimensions of the plot.
xlim : float
(Optional) Range in x.
ylim : float
(Optional) Range in y.
savefile : bool
Option to save the plot.
namefile : str
Name of the file to save in.
Returns
-------
None
None
"""
# Initialization
fig, axis = plt.subplots(nrows=1, ncols=1)
# Computing the EW portfolio
market_EW = marketdata.market_EWindex(market)
market_CW = marketdata.market_CWindex(market, marketcap)
# Plotting market
market_EW.plot(figsize=dims, color='black',
linestyle='--', linewidth=1,
ax=axis, legend=False)
# Plotting evaluation dates
for ed in evaluation_dates:
axis.axvline(x=ed, color='grey', linestyle='--', linewidth=1)
# Computing the PW portfolio
# market_shares = market.iloc[0]
# market_PW = market_PWindex(market, market_shares)
# market_PW.plot(ax=axis, color='k', linestyle='-', linewidth=2)
# Plotting individual portfolios
for name in propagation.columns:
propagation[name].plot(ax=axis)
# Re-Plotting market to appear on top
market_EW.plot(figsize=dims, color='black', linestyle='--', linewidth=1, ax=axis)
# Plotting the Cap-Weighted index so that it appears on top
market_CW.plot(figsize=dims, color='black', linestyle='-', linewidth=1, ax=axis)
# Set axes limits
axis.set_xlim(xlim)
axis.set_ylim(ylim)
# Saving plot as a png file
if savefile:
plt.savefig('./' + namefile)
return None
@typechecked
def show_allocation_distrib(step: int,
saved_gens: pd.DataFrame,
eval_dates: list,
n_bins: int=50,
savefile: bool=False,
namefile: str="Allocation_Distribution.png"
) -> None:
"""
Plots the distribution of saved generations (including elites and individuals)
for a certain step of the loop that ran in `Genetic_Portfolio_Routine`.
Since there are different individuals, we sum over these elements
for each asset and we divide by the number of individuals.
Parameters
----------
step : int
Step of the loop.
saved_gens : DataFrame
Generations to plot from.
eval_dates : List of Period dates
Evaluation dates for display.
n_bins : int
Number of bins.
savefile : bool
Option to save the plot.
namefile : str
Name of the file to save in.
Returns
-------
None
None
"""
# Initialization
nloops = len(saved_gens)-1
tmp = (saved_gens[step].sum() / saved_gens[step].shape[0]).tolist()
fig, axis = plt.subplots(nrows=1, ncols=1, figsize=(15,5))
# Assuming the largest allocations are always at the last step (which may not be true)
xmin = min((saved_gens[nloops].sum() / saved_gens[nloops].shape[0]).tolist()) * 1.2
xmax = max((saved_gens[nloops].sum() / saved_gens[nloops].shape[0]).tolist()) * 1.2
# Plotting
plt.hist(x=tmp, bins=n_bins, range=[xmin,xmax])
plt.title("Histogram of allocations - "
+ eval_dates[step].to_timestamp().strftime("%Y-%m-%d"))
# Saving plot as a png file
if savefile:
plt.savefig('./' + namefile)
return None
@typechecked
def config_4n(n: (int, int, int, int),
market: Market,
vix: pd.DataFrame,
savefile: bool=False,
namefile: str="VIX_derived_quantities.png"
) -> None:
"""
Plots the evaluation dates, ndays, mutation rate and fitness lambda
as computed from the Volatility Index (VIX) and 4 structure numbers.
Evaluation dates are computed according to the VIX, trying to get more evaluations
when the VIX takes high values.
This function is used to set the configuration of a dynamical creation
of evaluation dates and mutation rate.
Parameters
----------
n : 4-tuple of int
Structure parameters.
market : Market
Market from which we extract data about assets (i.e. genes).
vix : DataFrame
Values of the VIX over time.
savefile : bool
Option to save the plot.
namefile : str
Name of the file to save in.
Returns
-------
None
None
"""
# Checks
assert(len(n)==4)
# Initializations
n1, n2, n3, n4 = n
market_dates = market.data.index.to_timestamp().strftime("%Y-%m-%d").tolist()
save_eval_dates = []
save_mutation_rate = []
save_ndays = []
save_fitness_lambda = []
# Loop
loop = 0
eval_date = market.data.index[0]
next_eval_date = market.data.index[10]
while next_eval_date < market.data.index[-1]:
# Updating
eval_date = next_eval_date
save_eval_dates.append(next_eval_date)
# Computing the number of days to next date
vix_ateval = 1 + (vix[eval_date.to_timestamp().strftime("%Y-%m-%d")]/n1).astype('int')
ndays = n2 - vix_ateval
save_ndays.append(ndays)
if ndays <= 0:
| |
<reponame>GavinDuggan/python-spanner-orm<filename>spanner_orm/condition.py
# python3
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Used with Model#where and Model#count to help create Spanner queries."""
import abc
import base64
import dataclasses
import datetime
import decimal
import enum
import string
from typing import Any, Dict, Iterable, List, Mapping, Optional, Tuple, Type, TypeVar, Union
from spanner_orm import error
from spanner_orm import field
from spanner_orm import foreign_key_relationship
from spanner_orm import index
from spanner_orm import relationship
from google.api_core import datetime_helpers
from google.cloud.spanner_v1.proto import type_pb2
import immutabledict
T = TypeVar('T')
class Segment(enum.Enum):
"""The segment of the SQL query that a Condition belongs to."""
FROM = 1
JOIN = 2
WHERE = 3
ORDER_BY = 4
LIMIT = 5
class Condition(abc.ABC):
"""Base class for specifying conditions in a Spanner query."""
def __init__(self):
self.model_class = None # type: Optional[Type[Any]]
self.suffix = None # type: Optional[str]
def bind(self, model_class: Type[Any]) -> None:
"""Specifies which model instance the condition is being run on."""
self._validate(model_class)
self.model_class = model_class
def key(self, name: str) -> str:
"""Returns the unique parameter name for the given name.
When a name is used multiple times by different conditions (for instance,
we name parameters for the column they are being compared against, so
multiple conditions on the same column causes this), we need to generate
a unique name to disambiguate between these parameters. We do that by
appending a suffix that is based on the number of parameters that have
already been added to the query
Args:
name: Name of parameter to make unique
"""
if self.suffix:
return '{name}{suffix}'.format(name=name, suffix=self.suffix)
return name
def params(self) -> Dict[str, Any]:
"""Returns parameters to be used in the SQL query.
Returns:
Dictionary mapping from parameter name to value that should be
substituted for that parameter in the SQL query
"""
if not self.model_class:
raise error.SpannerError('Condition must be bound before usage')
return self._params()
@abc.abstractmethod
def _params(self) -> Dict[str, Any]:
raise NotImplementedError
@abc.abstractmethod
def segment(self) -> Segment:
"""Returns which segment of the SQL query this condition belongs to."""
raise NotImplementedError
def sql(self) -> str:
"""Generates and returns the SQL to be used in the Spanner query."""
if not self.model_class:
raise error.SpannerError('Condition must be bound before usage')
return self._sql()
@abc.abstractmethod
def _sql(self) -> str:
pass
def types(self) -> Dict[str, type_pb2.Type]:
"""Returns parameter types to be used in the SQL query.
Returns:
Dictionary mapping from parameter name to the type of the value that
should be substituted for that parameter in the SQL query
"""
if not self.model_class:
raise error.SpannerError('Condition must be bound before usage')
return self._types()
@abc.abstractmethod
def _types(self) -> Dict[str, type_pb2.Type]:
raise NotImplementedError
@abc.abstractmethod
def _validate(self, model_class: Type[Any]) -> None:
raise NotImplementedError
GuessableParamType = Union[
bool, #
int, #
float, #
datetime_helpers.DatetimeWithNanoseconds, #
datetime.datetime, #
datetime.date, #
bytes, #
str, #
decimal.Decimal, #
# These types technically include List[None] and Tuple[None, ...], but
# those can't be guessed.
List[Optional[bool]], #
List[Optional[int]], #
List[Optional[float]], #
List[Optional[datetime_helpers.DatetimeWithNanoseconds]], #
List[Optional[datetime.datetime]], #
List[Optional[datetime.date]], #
List[Optional[bytes]], #
List[Optional[str]], #
List[Optional[decimal.Decimal]], #
Tuple[Optional[bool], ...], #
Tuple[Optional[int], ...], #
Tuple[Optional[float], ...], #
Tuple[Optional[datetime_helpers.DatetimeWithNanoseconds], ...], #
Tuple[Optional[datetime.datetime], ...], #
Tuple[Optional[datetime.date], ...], #
Tuple[Optional[bytes], ...], #
Tuple[Optional[str], ...], #
Tuple[Optional[decimal.Decimal], ...], #
]
def _spanner_type_of_python_object(value: GuessableParamType) -> type_pb2.Type:
"""Returns the Cloud Spanner type of the given object.
Args:
value: Object to guess the type of.
"""
# See
# https://github.com/googleapis/python-spanner/blob/master/google/cloud/spanner_v1/proto/type.proto
# for the Cloud Spanner types, and
# https://github.com/googleapis/python-spanner/blob/e981adb3157bb06e4cb466ca81d74d85da976754/google/cloud/spanner_v1/_helpers.py#L91-L133
# for Python types.
if value is None:
raise TypeError(
'Cannot infer type of None, because any SQL type can be NULL.')
simple_type_code = {
bool: type_pb2.BOOL,
int: type_pb2.INT64,
float: type_pb2.FLOAT64,
datetime_helpers.DatetimeWithNanoseconds: type_pb2.TIMESTAMP,
datetime.datetime: type_pb2.TIMESTAMP,
datetime.date: type_pb2.DATE,
bytes: type_pb2.BYTES,
str: type_pb2.STRING,
decimal.Decimal: type_pb2.NUMERIC,
}.get(type(value))
if simple_type_code is not None:
return type_pb2.Type(code=simple_type_code)
elif isinstance(value, (list, tuple)):
element_types = tuple(
_spanner_type_of_python_object(item)
for item in value
if item is not None)
unique_element_type_count = len({
# Protos aren't hashable, so use their serializations.
element_type.SerializeToString(deterministic=True)
for element_type in element_types
})
if unique_element_type_count == 1:
return type_pb2.Type(
code=type_pb2.ARRAY,
array_element_type=element_types[0],
)
else:
raise TypeError(
f'Array does not have elements of exactly one type: {value!r}')
else:
raise TypeError('Unknown type: {value!r}')
@dataclasses.dataclass
class Param:
"""Parameter for substitution into a SQL query."""
value: Any
type: type_pb2.Type
@classmethod
def from_value(cls: Type[T], value: GuessableParamType) -> T:
"""Returns a Param with the type guessed from a Python value."""
guessed_type = _spanner_type_of_python_object(value)
# BYTES must be base64-encoded, see
# https://github.com/googleapis/python-spanner/blob/87789c939990794bfd91f5300bedc449fd74bd7e/google/cloud/spanner_v1/proto/type.proto#L108-L110
if (isinstance(value, bytes) and
guessed_type == type_pb2.Type(code=type_pb2.BYTES)):
encoded_value = base64.b64encode(value).decode()
elif (isinstance(value, (list, tuple)) and
all(isinstance(x, bytes) for x in value if x is not None) and
guessed_type == type_pb2.Type(
code=type_pb2.ARRAY,
array_element_type=type_pb2.Type(code=type_pb2.BYTES),
)):
encoded_value = tuple(
None if item is None else base64.b64encode(item).decode()
for item in value)
else:
encoded_value = value
return cls(value=encoded_value, type=guessed_type)
@dataclasses.dataclass
class Column:
"""Named column; consider using field.Field instead."""
name: str
# Something that can be substituted into a SQL query.
Substitution = Union[Param, field.Field, Column]
class ArbitraryCondition(Condition):
"""Condition with support for arbitrary SQL."""
def __init__(
self,
sql_template: str,
substitutions: Mapping[str, Substitution] = immutabledict.immutabledict(),
*,
segment: Segment,
):
"""Initializer.
Args:
sql_template: string.Template-compatible template string for the SQL.
substitutions: Substitutions to make in sql_template.
segment: Segment for this Condition.
"""
super().__init__()
self._sql_template = string.Template(sql_template)
self._substitutions = substitutions
self._segment = segment
# This validates the template.
self._sql_template.substitute({k: '' for k in self._substitutions})
def segment(self) -> Segment:
"""See base class."""
return self._segment
def _validate(self, model_class: Type[Any]) -> None:
"""See base class."""
for substitution in self._substitutions.values():
if isinstance(substitution, field.Field):
if substitution not in model_class.fields.values():
raise error.ValidationError(
f'Field {substitution.name!r} does not belong to the Model for '
f'table {model_class.table!r}.')
elif isinstance(substitution, Column):
if substitution.name not in model_class.fields:
raise error.ValidationError(
f'Column {substitution.name!r} does not exist in the Model for '
f'table {model_class.table!r}.')
def _params(self) -> Dict[str, Any]:
"""See base class."""
return {
self.key(k): v.value
for k, v in self._substitutions.items()
if isinstance(v, Param)
}
def _types(self) -> Dict[str, type_pb2.Type]:
"""See base class."""
return {
self.key(k): v.type
for k, v in self._substitutions.items()
if isinstance(v, Param)
}
def _sql_for_substitution(self, key: str, substitution: Substitution) -> str:
if isinstance(substitution, Param):
return f'@{self.key(key)}'
else:
assert isinstance(substitution, (field.Field, Column))
return f'{self.model_class.column_prefix}.{substitution.name}'
def _sql(self) -> str:
"""See base class."""
return self._sql_template.substitute({
k: self._sql_for_substitution(k, v)
for k, v in self._substitutions.items()
})
class ColumnsEqualCondition(Condition):
"""Used to join records by matching column values."""
def __init__(self, origin_column: str, destination_model_class: Type[Any],
destination_column: str):
super().__init__()
self.column = origin_column
self.destination_model_class = destination_model_class
self.destination_column = destination_column
def _params(self) -> Dict[str, Any]:
return {}
def segment(self) -> Segment:
return Segment.WHERE
def _sql(self) -> str:
return '{table}.{column} = {other_table}.{other_column}'.format(
table=self.model_class.table,
column=self.column,
other_table=self.destination_model_class.table,
other_column=self.destination_column)
def _types(self) -> Dict[str, type_pb2.Type]:
return {}
def _validate(self, model_class: Type[Any]) -> None:
if self.column not in model_class.fields:
raise error.ValidationError('{} is not a column on {}'.format(
self.column, model_class.table))
origin = model_class.fields[self.column]
if self.destination_column not in self.destination_model_class.fields:
raise error.ValidationError('{} is not a column on {}'.format(
self.destination_column, self.destination_model_class.table))
dest = self.destination_model_class.fields[self.destination_column]
if (origin.field_type() != dest.field_type() or
origin.nullable() != dest.nullable()):
raise error.ValidationError('Types of {} and {} do not match'.format(
origin.name, dest.name))
class ForceIndexCondition(Condition):
"""Used to indicate which index should be used in a Spanner query."""
def __init__(self, index_or_name: Union[Type[index.Index], str]):
super().__init__()
if isinstance(index_or_name, index.Index):
self.name = index_or_name.name
self.index = index_or_name
else:
self.name = index_or_name
self.index = None
def bind(self, model_class: Type[Any]) -> None:
super().bind(model_class)
self.index = self.model_class.indexes[self.name]
def _params(self) -> Dict[str, Any]:
return {}
def segment(self) -> Segment:
return Segment.FROM
def _sql(self) -> str:
return '@{{FORCE_INDEX={}}}'.format(self.name)
def _types(self) -> Dict[str, type_pb2.Type]:
return {}
def _validate(self, model_class: Type[Any]) -> None:
if self.name not in model_class.indexes:
raise error.ValidationError('{} is not an index on {}'.format(
self.name, model_class.table))
if self.index and self.index != model_class.indexes[self.name]:
raise error.ValidationError('{} does not belong to {}'.format(
self.index.name, model_class.table))
if model_class.indexes[self.name].primary:
raise error.ValidationError('Cannot force query using primary index')
class IncludesCondition(Condition):
"""Used to include related model_classs via a relation in a | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# standard modules
import csv
import math
# third party modules
import luigi
import matplotlib; matplotlib.use('Agg')
import matplotlib.pyplot as plt
import dadi
import pickle
import random
# local modules
from alleletraj.const import MUTATION_RATE
from vcf import PolarizeVCF, WholeAutosomeSNPsVCF
from alleletraj import utils
# number of sequential epochs to test
DADI_MAX_EPOCHS = 5 # TODO increase this, as 5 was the best model for cattle and it would be preferable to overshoot
# how many independent replicates should we run to find the global maximum params (dadi can get stuck in local maxima)
DADI_REPLICATES = 1000
# number of points to use in the grid
DADI_GRID_PTS = 100
# maximum relative log likelihood to not reject the second best model
DADI_MAX_RELATIVE_LL = 0.10
# is spectrum folded or polarised
DADI_FOLDED = True
DADI_UNFOLDED = False
def dadi_n_epoch(params, ns, pts):
"""
Sequential epoch model for dadi.
:param params: Population sizes and times of the epochs (e.g. n1, n2... t1, t2)
:param ns: Number of samples in resulting Spectrum
:param pts: Number of grid points to use in integration
"""
# how many epochs does this model have (each epoch has 2 params)
epochs = len(params) / 2
# nu: Ratio of contemporary to ancient population size
# t: Time in the past at which size change happened (in units of 2*Na generations)
nu, t = params[:epochs], params[epochs:]
# make the grid
grid = dadi.Numerics.default_grid(pts)
# one-dimensional phi for a constant-sized population
phi = dadi.PhiManip.phi_1D(grid)
for i in range(epochs):
# integrate a 1-dimensional phi forward
phi = dadi.Integration.one_pop(phi, grid, t[i], nu[i])
# compute sample Spectrum from population frequency distribution phi
fs = dadi.Spectrum.from_phi(phi, ns, [grid])
return fs
class EasySFSPopFile(utils.DatabaseTask):
"""
Make sample and population files for the just those samples used to calculate the SFS.
:type species: str
:type population: str
"""
species = luigi.Parameter()
population = luigi.Parameter()
def output(self):
return [luigi.LocalTarget('data/sfs/{}.{}'.format(self.basename, ext)) for ext in ['pops', 'spl']]
def run(self):
pop_file, spl_file = self.output()
# get the list of samples to use for the SFS calculation
samples = self.dbc.get_records('samples', {'population': self.population, 'ancient': 0, 'sfs': 1}, key='name')
# make both the samples and populations files
with pop_file.open('w') as pop_fout, spl_file.open('w') as spl_fout:
for sample in samples:
pop_fout.write('{}\t{}\n'.format(sample, self.population))
spl_fout.write('{}\n'.format(sample))
class EasySFS(utils.DatabaseTask):
"""
Calculate the Site Frequency Spectrum.
:type species: str
:type population: str
:type folded: bool
"""
species = luigi.Parameter()
population = luigi.Parameter()
folded = luigi.BoolParameter()
resources = {'cpu-cores': 1, 'ram-gb': 96}
def requires(self):
yield EasySFSPopFile(self.species, self.population)
yield WholeAutosomeSNPsVCF(self.species)
def output(self):
return [luigi.LocalTarget('data/sfs/{}/dadi/{}.{}'.format(self.basename, self.population, ext))
for ext in ['sfs', 'log']]
def run(self):
# unpack the params
(pop_file, _), vcf_file = self.input()
sfs_file, log_file = self.output()
# get the number of samples in the pop file
count = utils.run_cmd(['wc', '-l', pop_file.path])
num_samples = int(count.split()[0])
params = {
'vcf': vcf_file.path,
'pops': pop_file.path,
'out': self.basename,
'proj': num_samples * 2, # don't project down
'fold': '--unfolded' if not self.folded else ''
}
# TODO include all populations so we only have to load the VCF once!!
# NOTE easySFS expects the REF allele to be ancestral, rather than using the INFO/AA field
# pipe 'yes' into easySFS to get past the interactive prompt which complains about excluded samples
cmd = "echo 'yes' | easySFS.py -a -f -i {vcf} -p {pops} -o data/sfs/{out} --proj {proj} {fold}".format(**params)
log = utils.run_cmd([cmd], shell=True)
# save the output
with log_file.open('w') as fout:
fout.write(log)
class DadiEpochOptimizeParams(utils.PipelineTask):
"""
Optimise the log likelihood of the model parameters for the given SFS.
:type species: str
:type population: str
:type folded: bool
:type epoch: int
:type n: int
"""
species = luigi.Parameter()
population = luigi.Parameter()
folded = luigi.BoolParameter()
epoch = luigi.IntParameter()
n = luigi.IntParameter()
def requires(self):
return EasySFS(self.species, self.population, self.folded)
def output(self):
# trim the n value from the folder name
folder = self.basename.rpartition('-')[0]
return [luigi.LocalTarget('data/dadi/{}/{}.{}'.format(folder, self.basename, ext)) for ext in ['pkl', 'log']]
def run(self):
# unpack the inputs/outputs
sfs_file, _ = self.input()
pkl_file, log_file = self.output()
# load the frequency spectrum
fs = dadi.Spectrum.from_file(sfs_file.path)
# set the upper and lower parameter bounds (0.01 < nu < 100 | 0 < T < 5)
lower = [.01] * self.epoch + [0] * self.epoch
upper = [100] * self.epoch + [5] * self.epoch
# make a deterministic random seed (helps keep everything easily reproducible)
seed = int('{}{}'.format(self.epoch, self.n))
random.seed(seed)
# pick random starting values, bounded by the upper and lower parameter limits
start = [random.uniform(lower[i], upper[i]) for i in range(0, self.epoch * 2)]
# make the extrapolating version of our demographic model function.
dadi_n_epoch_extrap = dadi.Numerics.make_extrap_log_func(dadi_n_epoch)
# make sure the output folder exists
log_file.makedirs()
# optimize log(params) to fit model to data using Nelder-Mead algorithm
p_opt = dadi.Inference.optimize_log_fmin(start, fs, dadi_n_epoch_extrap, lower_bound=lower, upper_bound=upper,
pts=DADI_GRID_PTS, verbose=50, output_file=log_file.path,
full_output=True)
# fit the optimised model
model = dadi_n_epoch_extrap(p_opt[0], fs.sample_sizes, DADI_GRID_PTS)
# calculate theta given the model
theta = dadi.Inference.optimal_sfs_scaling(model, fs)
# save the relevant information
best = {'epoch': self.epoch, 'n': self.n, 'lnL': -p_opt[1], 'params': p_opt[0], 'theta': theta}
# save the results by pickling them in a file
with pkl_file.open('w') as fout:
pickle.dump(best, fout)
class DadiEpochMaximumLikelihood(utils.PipelineTask):
"""
Find the model run with the maximum log likelihood out of all the replicates.
Because dadi gets stuck easily in local maxima we run multiple replicates.
:type species: str
:type population: str
:type folded: bool
:type epoch: int
"""
species = luigi.Parameter()
population = luigi.Parameter()
folded = luigi.BoolParameter()
epoch = luigi.IntParameter()
def requires(self):
yield EasySFS(self.species, self.population, self.folded)
for n in range(1, DADI_REPLICATES + 1):
yield DadiEpochOptimizeParams(self.species, self.population, self.folded, self.epoch, n)
def output(self):
yield luigi.LocalTarget('data/dadi/{}/{}-maxlnL.pkl'.format(self.basename, self.basename))
yield luigi.LocalTarget('data/dadi/{}.params'.format(self.basename))
yield luigi.LocalTarget('data/dadi/{}.pdf'.format(self.basename))
def run(self):
# unpack the params
(sfs_file, _), pkl_files = self.input()[0], self.input()[1:]
pkl_out, params_file, pdf_file = self.output()
params = []
# load all the pickled param values from the replicate runs
for pkl_file, _ in pkl_files:
with pkl_file.open('r') as fin:
params.append(pickle.load(fin))
# find the params that produced the highest maximum likelihood
max_lnl = max(params, key=lambda x: x['lnL'])
# load the frequency spectrum
fs = dadi.Spectrum.from_file(sfs_file.path)
# make the extrapolating version of our demographic model function.
dadi_n_epoch_extrap = dadi.Numerics.make_extrap_log_func(dadi_n_epoch)
# fit the optimised model
model = dadi_n_epoch_extrap(max_lnl['params'], fs.sample_sizes, DADI_GRID_PTS)
# plot the figure
fig = plt.figure(1)
dadi.Plotting.plot_1d_comp_multinom(model, fs, fig_num=1, plot_masked=True)
fig.savefig(pdf_file.path)
plt.close(fig)
# save the pickle and the params
with pkl_out.open('w') as fout:
pickle.dump(max_lnl, fout)
with params_file.open('w') as fout:
fout.write('{}'.format(max_lnl))
class CountChromSites(utils.DatabaseTask):
"""
Count the number of callable sites in a chromosome.
:type species: str
:type population: str
:type chrom: str
"""
species = luigi.Parameter()
population = luigi.Parameter()
chrom = luigi.Parameter()
def requires(self):
yield EasySFSPopFile(self.species, self.population)
yield PolarizeVCF(self.species, self.chrom)
def output(self):
return luigi.LocalTarget('data/sfs/{}.size'.format(self.basename))
def run(self):
# unpack the params
(_, spl_file), vcf_file = self.input()
size_file = self.output()
# count the unique sites
cmd = "bcftools view --samples-file {} --exclude-uncalled --exclude-types indels,mnps,bnd,other {} | " \
"bcftools query --format '%CHROM %POS\\n' | uniq | wc -l".format(spl_file.path, vcf_file.path)
size = utils.run_cmd([cmd], shell=True)
with size_file.open('w') as fout:
fout.write('{}'.format(size))
class CountCallableSites(utils.DatabaseTask):
"""
Count the total number of callable sites, so dadi cant estimate the ancestral population size from theta.
:type species: str
:type population: str
"""
species = luigi.Parameter()
population = luigi.Parameter()
def requires(self):
for chrom in self.autosomes:
yield CountChromSites(self.species, self.population, chrom)
def output(self):
return luigi.LocalTarget('data/dadi/{}.L'.format(self.basename))
def run(self):
# unpack the params
chrom_files = self.input()
size_file = self.output()
total = 0
# sum all the chromosome sizes
for chrom_file in chrom_files:
with chrom_file.open() as fin:
total += int(fin.read())
with size_file.open('w') as fout:
fout.write('{}'.format(total))
class DadiEpochDemography(utils.PipelineTask):
"""
Convert the best fitting dadi model for a given epoch into a demography file for `selection`.
:type species: str
:type population: str
:type folded: bool
:type epoch: int
"""
species = luigi.Parameter()
population = luigi.Parameter()
folded = luigi.BoolParameter()
epoch = luigi.IntParameter()
def requires(self):
yield DadiEpochMaximumLikelihood(self.species, self.population, self.folded, self.epoch)
yield CountCallableSites(self.species, self.population)
def output(self):
(pkl_file, _, _), _ = self.input()
yield pkl_file # pass on the pickle file
yield luigi.LocalTarget('data/dadi/{}.pop'.format(self.basename))
yield luigi.LocalTarget('data/dadi/{}.nref'.format(self.basename))
def run(self):
# unpack the inputs/outputs
(pkl_file, _, _), size_file = self.input()
_, pop_file, nref_file = self.output()
# load the best params for the epoch
with pkl_file.open('r') as fin:
best = pickle.load(fin)
# | |
import cv2
import numpy as np
import os
import time
import tkinter as tk
import PIL.Image, PIL.ImageTk
################################################################################
class ImageBox(tk.Canvas):
"""create a frame that displays the desired image with supported effects"""
HISTORY_SIZE = 100
############################################################################
def __init__(self, parent, *images, imgMax=225, padding=0, bg="#000000",
**effects):
self.idx = 0
self.images = []
if isinstance(padding, (int, float)): # convert param into square dimensional parameters
padding = [padding] * 2
self.paddingX, self.paddingY = padding
if isinstance(imgMax, (int, float)): #convert param into square dimensional parameters
imgMax = [imgMax] * 2
maxX, maxY = imgMax
self.windowX = 1 # remember the previous window size to know if a resize event occurred
self.windowY = 1
self.bgColor = bg
super(ImageBox, self).__init__(parent, width=maxX, height=maxY, bg=bg)
self.pack()
self._updateHistory = []
self.addImgpath(*images)
self.stopEffects()
try: self.update(**effects) # ensure that an image is drawn initially, even when no subsequent update() calls are made
except IndexError: pass # can't display an image if no images are provided yet
############################################################################
def __len__(self):
return len(self.images)
############################################################################
def __str__(self): return self.__repr__()
def __repr__(self):
if len(self) > 1: imgStr = "%d images"%(len(self))
elif self.images: imgStr = list(self.images[0])[0]
else: imgStr = None
if self.fps: fpsStr = " %.1f fps"%(self.fps)
else: fpsStr = fpsStr = ""
return "<%s %s%s>"%(self.__class__.__name__, imgStr, fpsStr)
############################################################################
@property
def fps(self):
"""calculate the number of updates() per second that invoked"""
if not self._updateHistory: return 0
start = self._updateHistory[0]
end = self._updateHistory[-1]
try: return len(self._updateHistory) / (end - start)
except: return 0 # avoid divide-by-zero error
############################################################################
@property
def img(self):
"""apply all active effects to the raw image"""
try:
images = self.resize(self.idx)
except:
raise IndexError("There are noimages currently loaded in %s"%(self))
imgName, cvImg = images[0]
if self._imgPct < 1.0: # apply fade to the iamge whenever the image effect state is darkened at all
x = [
int(self.bgColor[1:3], 16),
int(self.bgColor[3:5], 16),
int(self.bgColor[5:7], 16),
]
factor = sum(x) / len(x)
# TODO -- fade to match the background color, not just black
cvImg = (cvImg * self._imgPct).astype(np.uint8)
return cvImg
############################################################################
@property
def isFading(self):
return self._fadeInDur or self._fadeOutDur
############################################################################
@property
def isRotating(self):
return bool(self._rotateDur)
############################################################################
def addImgpath(self, *imageFilepaths):
"""add an image to the managed images within this ImageBox"""
for name in imageFilepaths:
absname = os.path.abspath(name)
imgObj = cv2.imread(absname)
imgTuple = (absname, imgObj)
try:
for i, (imgName, img) in enumerate(self.images):
if absname == imgName: # this same image was loaded previously
self.images[i] = imgTuple # update previously loaded image with the new image
raise Exception("") # don't append this image tuple because it was updated in place
except: continue
self.images.append(imgTuple)
############################################################################
def advanceImage(self):
"""the next image in the list of managed images is selected"""
self.idx += 1
if self.idx >= len(self): # loop back around tothe first image
self.idx = 0
############################################################################
def removeImgpath(self, *imageFilepaths):
"""remove images from this ImageBox's managed images"""
for name in imageFilepaths:
absname = os.path.abspath(name)
delIdx = None
for i, (imgName, img) in enumerate(self.images):
if absname == imgName: # this same image was loaded previously
delIdx = i
break
if delIdx != None: # only succeds if img is found in managed images list
del self.images[delIdx]
if delIdx < self.idx:
self.idx -= 1 # when a previous image is removed, index is decremented to remain on the current image
############################################################################
def resize(self, *idx):
"""ensure all managed images are sized appropriately to fill the """\
"""window as best as possible"""
winX = self.winfo_width()
winY = self.winfo_height()
if idx: imgIter = [self.images[i] for i in idx] # select only the images that were specified
elif self.windowX == winX and self.windowY == winY: return self.images # nothing worth doing
else: imgIter = self.images # select all images
ret = []
for imgName, img in imgIter:
x, y, dim = img.shape
scaleFactor = min(max(1, winX - self.paddingX) / x,
max(1, winY - self.paddingY) / y) # select the factor that takes the smallest scaled distnace from image edge to window edge
newX = max(1, int(x * scaleFactor))
newY = max(1, int(y * scaleFactor))
#print("resize %s %dx%d * >%.2f -> %dx%d"%(imgName, x, y, scaleFactor, newX, newY))
newImg = cv2.resize(img, (newX, newY))
ret.append((imgName, newImg))
self.windowX = winX # retain for future resize comparison as well as center anchoring calculation
self.windowY = winY
return ret
############################################################################
def startEffect(self,
fadein = None, # seconds until the image is fully faded in
#fadeinout = None, # seconds until the image is fully faded in and out
fadeout = None, # seconds until the image is fully faded out
#fadeoutin = None, # seconds until the image is fully faded out and in
fadeCycle = None, # seconds spent on each fade in and out cycle
fadeDelay = 0.0 , # seconds waiting before effect begins
rotate = None, # seconds until the nexst image is automatically selected (repeats)
now = None, # if timing is provided from elsewhere
**kwargs , # unhandled effects
):
"""force the active image to also apply specified effects"""
if not now: now = time.time()
if fadeCycle:
self._fadeCycle = fadeCycle
if self._fadeInDur: # adjust fade-in duration
elapsed = now - self._fadeStart
effectPct = elapsed / self._fadeInDur # the amount of effect that has completed
self._fadeInDur = (1 - effectPct) * fadeCycle
self._fadeStartPct = self._imgPct
self.fadeStart = now
elif self._fadeOutDur: # adjust fade-out duration
elapsed = now - self._fadeStart
effectPct = elapsed / self._fadeOutDur # the amount of the effect that has completed
self._fadeOutDur = (1 - effectPct) * fadeCycle
self._fadeStartPct = self._imgPct
self._fadeStart = now
elif not (fadein or fadeout):
fadeout = float(fadeCycle) # ensure fadein or fadeout begin
#elif fadeoutin: pass # TODO
#elif fadeinout: pass # TODO
if fadeDelay:
self._fadeDelay = fadeDelay
if fadein or fadeout:
if fadein:
if not self._fadeOutDur: # fade in starting point is the current fade percentage else fade in from black
self._imgPct = 0.0 # reset to black image
self._fadeInDur = fadein
self._fadeOutDur = 0.0 # ensure any fadeout is stopped
else:
if not self._fadeInDur: # fade starting point is the current fade percentage else fade out from full image
self._imgPct = 1.0 # reset to full image
self._fadeInDur = 0.0 # ensure any fadein is stopped
self._fadeOutDur = fadeout
self._fadeStartPct = self._imgPct
self._fadeStart = now + self._fadeDelay
if rotate:
self._rotateStart = now
self._rotateDur = rotate
if kwargs:
msg = ["received unhandled effects key/values"]
msg += [" %s : %s"%(k, v) for k, v in kwargs.items()]
msg = os.linesep.join(msg)
raise ValueError(msg)
############################################################################
def stopEffects(self):
"""immediately stop all effects"""
self.stopFade()
self.stopRotate()
############################################################################
def stopFade(self):
"""immediately stop all fading with the image in its initial state"""
self._fadeInDur = 0.0 # number of seconds to fade the image in
self._fadeOutDur = 0.0 # number of seconds to fade the image out
self._fadeStartPct = 0.0 # the amount of fade that was present when the current began
self._fadeStart = 0.0 # when the current effect operationi began
self._fadeCycle = 0.0 # whether fading in/out should persist indefinitely
self._fadeDelay = 0.0 # the amount of time to wait before applying the fade effect
self._imgPct = 1.0 # the remaining percent of image after fade
############################################################################
def stopRotate(self):
"""immediately stop all image rotation, resting on the current step"""
self._rotateDur = 0.0 # how long each image should be presented
self._rotateStart = 0.0 # whether the next type of fade should follow
############################################################################
def update(self, **newEffects):
"""refresh the displayed image, including all desired effects"""
now = time.time()
self._updateHistory.append(now)
self._updateHistory = self._updateHistory[:self.HISTORY_SIZE] # only allow a maximum HISTORY_SIZE number of entries into the history
#print("updating %d%%"%(int(round(100*self._imgPct))))
if newEffects:
self.startEffect(**newEffects)
if self._fadeInDur: # update the fade-in effect
elapsed = now - self._fadeStart
if elapsed > 0: # any delay has already been met
effectPct = elapsed / self._fadeInDur # the amount of the effect that has completed
valProgressed = (1.0 - self._fadeStartPct) * effectPct # the amount of percentage that is now covered
self._imgPct = min(1.0, self._fadeStartPct + valProgressed) # calculate the next value in this effect's progress
if effectPct | |
from collections import namedtuple
from autograd import value_and_grad, vector_jacobian_product
from autograd.extend import primitive, defvjp
import autograd.numpy as np
import autograd.numpy.random as npr
import autograd.scipy.stats.multivariate_normal as mvn
import autograd.scipy.stats.t as t_dist
from autograd.scipy.linalg import sqrtm
from scipy.linalg import eigvalsh
from paragami import (PatternDict,
NumericVectorPattern,
PSDSymmetricMatrixPattern,
FlattenFunctionInput)
from functools import partial
import tqdm
import scipy.stats as stats
from ._distributions import multivariate_t_logpdf
from .functions import compute_R_hat, compute_R_hat_adaptive_numpy, compute_R_hat_halfway, stochastic_iterate_averaging
from .functions import flat_to_triang, triang_to_flat
__all__ = [
'mean_field_gaussian_variational_family',
'mean_field_t_variational_family',
't_variational_family',
'black_box_klvi',
'black_box_klvi_pd',
'black_box_klvi_pd2',
'black_box_chivi',
'make_stan_log_density',
'adagrad_optimize',
'rmsprop_IA_optimize_with_rhat',
'adam_IA_optimize_with_rhat'
]
VariationalFamily = namedtuple('VariationalFamily',
['sample', 'entropy',
'logdensity', 'mean_and_cov',
'pth_moment', 'var_param_dim'])
def mean_field_gaussian_variational_family(dim):
rs = npr.RandomState(0)
def unpack_params(var_param):
mean, log_std = var_param[:dim], var_param[dim:]
return mean, log_std
def sample(var_param, n_samples, seed=None):
my_rs = rs if seed is None else npr.RandomState(seed)
mean, log_std = unpack_params(var_param)
return my_rs.randn(n_samples, dim) * np.exp(log_std) + mean
def entropy(var_param):
mean, log_std = unpack_params(var_param)
return 0.5 * dim * (1.0 + np.log(2*np.pi)) + np.sum(log_std)
def logdensity(x, var_param):
mean, log_std = unpack_params(var_param)
return mvn.logpdf(x, mean, np.diag(np.exp(2*log_std)))
def mean_and_cov(var_param):
mean, log_std = unpack_params(var_param)
return mean, np.diag(np.exp(2*log_std))
def pth_moment(p, var_param):
if p not in [2,4]:
raise ValueError('only p = 2 or 4 supported')
_, log_std = unpack_params(var_param)
vars = np.exp(2*log_std)
if p == 2:
return np.sum(vars)
else: # p == 4
return 2*np.sum(vars**2) + np.sum(vars)**2
return VariationalFamily(sample, entropy, logdensity,
mean_and_cov, pth_moment, 2*dim)
def full_rank_gaussian_variational_family(dim):
rs = npr.RandomState(0)
def beta_to_L(beta):
print(beta.shape)
L = flat_to_triang(beta)
L= L[0]
return L
def L_to_beta(L):
return triang_to_flat(L)
def unpack_params(var_param):
mean, beta = var_param[:dim], var_param[dim:]
return mean, beta
def sample(var_param, n_samples, seed=None):
my_rs = rs if seed is None else npr.RandomState(seed)
mean, beta = unpack_params(var_param)
L = beta_to_L(beta)
return np.dot( my_rs.randn(n_samples, dim), L) + mean
#return my_rs.randn(n_samples, dim) @ L + mean
def entropy(var_param):
mean, beta = unpack_params(var_param)
L = beta_to_L(beta[:,np.newaxis])
return np.sum(np.log(np.diag(L))) + 0.5*dim* (1 + np.log(2 * np.pi))
#return 0.5 * dim * (1.0 + np.log(2*np.pi)) + np.sum(log_std)
def logdensity(x, var_param):
mean, beta = unpack_params(var_param)
L = beta_to_L(beta[:,np.newaxis])
Sigma = L@L.T
return mvn.logpdf(x, mean, Sigma)
def mean_and_cov(var_param):
mean, beta = unpack_params(var_param)
L = beta_to_L(beta[:,np.newaxis])
Sigma = L@L.T
return mean, np.diag(Sigma)
return VariationalFamily(sample, entropy, logdensity, mean_and_cov, dim*(dim+3)//2)
def mean_field_t_variational_family(dim, df):
if df <= 2:
raise ValueError('df must be greater than 2')
rs = npr.RandomState(0)
def unpack_params(var_param):
mean, log_scale = var_param[:dim], var_param[dim:]
return mean, log_scale
def sample(var_param, n_samples, seed=None):
my_rs = rs if seed is None else npr.RandomState(seed)
mean, log_scale = unpack_params(var_param)
return mean + np.exp(log_scale)*my_rs.standard_t(df, size=(n_samples, dim))
def entropy(var_param):
# ignore terms that depend only on df
mean, log_scale = unpack_params(var_param)
return np.sum(log_scale)
def logdensity(x, var_param):
mean, log_scale = unpack_params(var_param)
if x.ndim == 1:
x = x[np.newaxis,:]
return np.sum(t_dist.logpdf(x, df, mean, np.exp(log_scale)), axis=-1)
def mean_and_cov(var_param):
mean, log_scale = unpack_params(var_param)
return mean, df / (df - 2) * np.diag(np.exp(2*log_scale))
def pth_moment(p, var_param):
if p not in [2,4]:
raise ValueError('only p = 2 or 4 supported')
if df <= p:
raise ValueError('df must be greater than p')
_, log_scale = unpack_params(var_param)
scales = np.exp(log_scale)
c = df / (df - 2)
if p == 2:
return c*np.sum(scales**2)
else: # p == 4
return c**2*(2*(df-1)/(df-4)*np.sum(scales**4) + np.sum(scales**2)**2)
return VariationalFamily(sample, entropy, logdensity,
mean_and_cov, pth_moment, 2*dim)
def _get_mu_sigma_pattern(dim):
ms_pattern = PatternDict(free_default=True)
ms_pattern['mu'] = NumericVectorPattern(length=dim)
ms_pattern['Sigma'] = PSDSymmetricMatrixPattern(size=dim)
return ms_pattern
def t_variational_family(dim, df):
if df <= 2:
raise ValueError('df must be greater than 2')
rs = npr.RandomState(0)
ms_pattern = _get_mu_sigma_pattern(dim)
logdensity = FlattenFunctionInput(
lambda x, ms_dict: multivariate_t_logpdf(x, ms_dict['mu'], ms_dict['Sigma'], df),
patterns=ms_pattern, free=True, argnums=1)
def sample(var_param, n_samples, seed=None):
my_rs = rs if seed is None else npr.RandomState(seed)
s = np.sqrt(my_rs.chisquare(df, n_samples) / df)
param_dict = ms_pattern.fold(var_param)
z = my_rs.randn(n_samples, dim)
sqrtSigma = sqrtm(param_dict['Sigma'])
return param_dict['mu'] + np.dot(z, sqrtSigma)/s[:,np.newaxis]
def entropy(var_param):
# ignore terms that depend only on df
param_dict = ms_pattern.fold(var_param)
return .5*np.log(np.linalg.det(param_dict['Sigma']))
def mean_and_cov(var_param):
param_dict = ms_pattern.fold(var_param)
return param_dict['mu'], df / (df - 2.) * param_dict['Sigma']
def pth_moment(p, var_param):
if p not in [2,4]:
raise ValueError('only p = 2 or 4 supported')
if df <= p:
raise ValueError('df must be greater than p')
param_dict = ms_pattern.fold(var_param)
sq_scales = np.linalg.eigvalsh(param_dict['Sigma'])
c = df / (df - 2)
if p == 2:
return c*np.sum(sq_scales)
else: # p == 4
return c**2*(2*(df-1)/(df-4)*np.sum(sq_scales**2) + np.sum(sq_scales)**2)
return VariationalFamily(sample, entropy, logdensity, mean_and_cov,
pth_moment, ms_pattern.flat_length(True))
def black_box_klvi(var_family, logdensity, n_samples):
def variational_objective(var_param):
"""Provides a stochastic estimate of the variational lower bound."""
samples = var_family.sample(var_param, n_samples)
lower_bound = var_family.entropy(var_param) + np.mean(logdensity(samples))
return -lower_bound
objective_and_grad = value_and_grad(variational_objective)
return objective_and_grad
def black_box_chivi(alpha, var_family, logdensity, n_samples):
def compute_log_weights(var_param, seed):
"""Provides a stochastic estimate of the variational lower bound."""
samples = var_family.sample(var_param, n_samples, seed)
log_weights = logdensity(samples) - var_family.logdensity(samples, var_param)
return log_weights
log_weights_vjp = vector_jacobian_product(compute_log_weights)
def objective_grad_and_log_norm(var_param):
seed = npr.randint(2**32)
log_weights = compute_log_weights(var_param, seed)
log_norm = np.max(log_weights)
scaled_values = np.exp(log_weights - log_norm)**alpha
obj_value = np.log(np.mean(scaled_values))/alpha + log_norm
obj_grad = alpha*log_weights_vjp(var_param, seed, scaled_values) / scaled_values.size
return (obj_value, obj_grad)
return objective_grad_and_log_norm
def black_box_klvi_pd(var_family, logdensity, n_samples):
def variational_objective(var_param):
"""Provides a stochastic estimate of the variational lower bound."""
samples = var_family.sample(var_param, n_samples)
lower_bound = np.mean(logdensity(samples)) - np.mean(var_family.logdensity(samples, var_param))
return -lower_bound
objective_and_grad = value_and_grad(variational_objective)
#objective_path_val = np.mean(logdensity(Samples))
return objective_and_grad
def black_box_klvi_pd2(var_family, logdensity, n_samples):
#a formulation which avoids path derivatives ...
def variational_objective(var_param):
"""Provides a stochastic estimate of the variational lower bound."""
samples = var_family.sample(var_param, n_samples)
a = partial(var_family.logdensity,var_param=var_param)
def nested_fn(samples):
lower_bound = np.mean(logdensity(samples)) - np.mean(a(samples))
return -lower_bound
b= nested_fn(samples)
return b
objective_and_grad = value_and_grad(variational_objective)
#objective_path_val = np.mean(logdensity(Samples))
return objective_and_grad
def _vectorize_if_needed(f, a, axis=-1):
if a.ndim > 1:
return np.apply_along_axis(f, axis, a)
else:
return f(a)
def _ensure_2d(a):
while a.ndim < 2:
a = a[:,np.newaxis]
return a
def make_stan_log_density(fitobj):
@primitive
def log_density(x):
return _vectorize_if_needed(fitobj.log_prob, x)
def log_density_vjp(ans, x):
return lambda g: _ensure_2d(g) * _vectorize_if_needed(fitobj.grad_log_prob, x)
defvjp(log_density, log_density_vjp)
return log_density
def learning_rate_schedule(n_iters, learning_rate, learning_rate_end):
if learning_rate <= 0:
raise ValueError('learning rate must be positive')
if learning_rate_end is not None:
if learning_rate <= learning_rate_end:
raise ValueError('initial learning rate must be greater than final learning rate')
# constant learning rate for first quarter, then decay like a/(b + i)
# for middle half, then constant for last quarter
b = n_iters*learning_rate_end/(2*(learning_rate - learning_rate_end))
a = learning_rate*b
start_decrease_at = n_iters//4
end_decrease_at = 3*n_iters//4
for i in range(n_iters):
if learning_rate_end is None or i < start_decrease_at:
yield learning_rate
elif i < end_decrease_at:
yield a / (b + i - start_decrease_at + 1)
else:
yield learning_rate_end
def adagrad_optimize(n_iters, objective_and_grad, init_param,
has_log_norm=False, window=10,learning_rate=.01,
epsilon=.1, learning_rate_end=None):
local_grad_history = []
local_log_norm_history = []
value_history = []
log_norm_history = []
variational_param = init_param.copy()
variational_param_history = []
with tqdm.trange(n_iters) as progress:
try:
schedule = learning_rate_schedule(n_iters, learning_rate, learning_rate_end)
for i, curr_learning_rate in zip(progress, schedule):
prev_variational_param = variational_param
if has_log_norm:
obj_val, obj_grad, log_norm = objective_and_grad(variational_param)
else:
obj_val, obj_grad = objective_and_grad(variational_param)
log_norm = 0
value_history.append(obj_val)
local_grad_history.append(obj_grad)
local_log_norm_history.append(log_norm)
log_norm_history.append(log_norm)
if len(local_grad_history) > window:
local_grad_history.pop(0)
local_log_norm_history.pop(0)
grad_scale = np.exp(np.min(local_log_norm_history) - np.array(local_log_norm_history))
scaled_grads = grad_scale[:,np.newaxis]*np.array(local_grad_history)
accum_sum = np.sum(scaled_grads**2, axis=0)
variational_param = variational_param - curr_learning_rate*obj_grad/np.sqrt(epsilon + accum_sum)
if i >= 3*n_iters // 4:
variational_param_history.append(variational_param.copy())
if i % 10 == 0:
avg_loss = np.mean(value_history[max(0, i - 1000):i + 1])
progress.set_description(
'Average Loss = {:,.5g}'.format(avg_loss))
except (KeyboardInterrupt, StopIteration) as e: # pragma: no cover
# do not print log on the same line
progress.close()
finally:
progress.close()
variational_param_history = np.array(variational_param_history)
smoothed_opt_param = np.mean(variational_param_history, axis=0)
return (smoothed_opt_param, variational_param_history,
np.array(value_history), np.array(log_norm_history))
def rmsprop_IA_optimize_with_rhat(n_iters, objective_and_grad, init_param,K,
has_log_norm=False, window=500, learning_rate=.01,
epsilon=.000001, rhat_window=500, averaging=True, n_optimisers=1,
r_mean_threshold=1.15, r_sigma_threshold=1.20,learning_rate_end=None):
local_grad_history = []
local_log_norm_history = []
value_history = []
log_norm_history = []
variational_param = init_param.copy()
variational_param_history = []
averaged_variational_param_history = []
start_avg_iter = n_iters // 1.3
sum_grad_norm = 0.
alpha = 0.9
scaled_sum_grad_norm = 0.
variational_param_history_list = []
averaged_variational_param_history_list = []
variational_param_list = []
averaged_variational_param_list = []
#window_size=500
for o in range(n_optimisers):
variational_param_history = []
np.random.seed(seed=o)
if o >= 1:
variational_param = init_param + stats.norm.rvs(size=len(init_param))*(o+1)*0.2
#variational_param = init_param
#print(variational_param)
with tqdm.trange(n_iters) as progress:
try:
schedule = learning_rate_schedule(n_iters, learning_rate, learning_rate_end)
for i, curr_learning_rate in zip(progress, schedule):
prev_variational_param = variational_param
if has_log_norm:
obj_val, obj_grad, log_norm = objective_and_grad(variational_param)
else:
obj_val, obj_grad = objective_and_grad(variational_param)
log_norm = 0
value_history.append(obj_val)
local_grad_history.append(obj_grad)
local_log_norm_history.append(log_norm)
log_norm_history.append(log_norm)
if len(local_grad_history) > window:
local_grad_history.pop(0)
local_log_norm_history.pop(0)
if has_log_norm:
grad_norm = np.exp(log_norm)
else:
grad_norm = np.sum(obj_grad ** 2, axis=0)
if i == 0:
# sum_grad_squared=obj_grad**2
sum_grad_squared = grad_norm
else:
# sum_grad_squared = sum_grad_squared*alpha + (1.-alpha)*obj_grad**2
| |
<reponame>amitkr2410/JETSCAPE-analysis
"""
macro for plotting analyzed jetscape events
"""
# This script plots histograms created in the analysis of Jetscape events
#
# Author: <NAME> (<EMAIL>)
# General
import os
import sys
import yaml
import argparse
# Data analysis and plotting
import ROOT
import numpy as np
import pptx # pip install python-pptx
# Base class
sys.path.append('.')
from jetscape_analysis.base import common_base
from plot import plot_results_STAT_utils
# Prevent ROOT from stealing focus when plotting
ROOT.gROOT.SetBatch(True)
################################################################
class PlotResults(common_base.CommonBase):
# ---------------------------------------------------------------
# Constructor
# ---------------------------------------------------------------
def __init__(self, config_file='', input_file='', pp_ref_file='', **kwargs):
super(PlotResults, self).__init__(**kwargs)
self.output_dir = os.path.dirname(input_file)
self.plot_utils = plot_results_STAT_utils.PlotUtils()
self.plot_utils.setOptions()
ROOT.gROOT.ForceStyle()
self.input_file = ROOT.TFile(input_file, 'READ')
self.data_color = ROOT.kGray+3
self.data_marker = 21
self.jetscape_color = [ROOT.kViolet-8, ROOT.kViolet-8, ROOT.kRed-7, ROOT.kTeal-8, ROOT.kCyan-2, ROOT.kGreen-6, ROOT.kAzure-4, ROOT.kOrange+6, ROOT.kBlue-10]
self.jetscape_fillstyle = [1001, 3144, 1001, 3144]
self.jetscape_alpha = [0.7, 0.7, 0.7, 0.7]
self.jetscape_marker = 20
self.marker_size = 1.5
self.line_width = 2
self.line_style = 1
self.file_format = '.pdf'
# Check whether pp or AA
if 'PbPb' in input_file or 'AuAu' in input_file:
self.is_AA = True
self.observable_centrality_list = []
else:
self.is_AA = False
# If AA, load the pp reference results so that we can construct RAA
if self.is_AA:
self.pp_ref_file = ROOT.TFile(pp_ref_file, 'READ')
# Read config file
with open(config_file, 'r') as stream:
self.config = yaml.safe_load(stream)
self.sqrts = self.config['sqrt_s']
self.power = self.config['power']
self.pt_ref = self.config['pt_ref']
# If AA, set different options for hole subtraction treatment
self.jet_collection_labels_AA = self.config['jet_collection_labels'] + ['shower_recoil_unsubtracted']
self.jet_collection_label_pp = ''
if self.is_AA:
self.jet_collection_labels = self.jet_collection_labels_AA
else:
self.jet_collection_labels = [self.jet_collection_label_pp]
# We will write final results after all scalings, along with data, to file
self.output_dict = {}
print(self)
#-------------------------------------------------------------------------------------------
#-------------------------------------------------------------------------------------------
#-------------------------------------------------------------------------------------------
def plot_results(self):
self.plot_hadron_observables(observable_type='hadron')
self.plot_hadron_correlation_observables(observable_type='hadron_correlations')
self.plot_jet_observables(observable_type='inclusive_chjet')
if 'inclusive_jet' in self.config:
self.plot_jet_observables(observable_type='inclusive_jet')
if 'semi_inclusive_chjet' in self.config:
self.plot_semi_inclusive_chjet_observables(observable_type='semi_inclusive_chjet')
if 'dijet' in self.config:
self.plot_jet_observables(observable_type='dijet')
self.plot_event_qa()
self.write_output_objects()
# Generate pptx for convenience
if self.file_format == '.png':
self.generate_pptx()
#-------------------------------------------------------------------------------------------
# Plot hadron observables
#-------------------------------------------------------------------------------------------
def plot_hadron_observables(self, observable_type=''):
print()
print(f'Plot {observable_type} observables...')
for observable, block in self.config[observable_type].items():
for centrality_index,centrality in enumerate(block['centrality']):
if 'hepdata' not in block and 'custom_data' not in block:
continue
# Initialize observable configuration
self.suffix = ''
self.init_observable(observable_type, observable, block, centrality, centrality_index)
# Plot observable
self.plot_observable(observable_type, observable, centrality)
#-------------------------------------------------------------------------------------------
# Plot hadron correlation observables
#-------------------------------------------------------------------------------------------
def plot_hadron_correlation_observables(self, observable_type=''):
print()
print(f'Plot {observable_type} observables...')
for observable, block in self.config[observable_type].items():
for centrality_index, centrality in enumerate(block['centrality']):
if 'hepdata' not in block and 'custom_data' not in block:
continue
# STAR dihadron
if observable == 'dihadron_star':
# Initialize observable configuration
pt_trigger_ranges = block["pt_trig"]
pt_associated_ranges = block["pt_assoc"]
# Loop over trigger and associated ranges
for pt_trig_range in pt_trigger_ranges:
for pt_trig_min, pt_trig_max in pt_trig_range:
for pt_assoc_range in pt_associated_ranges:
for pt_assoc_min, pt_assoc_max in pt_assoc_range:
self.suffix = f"pt_trig_{pt_trig_min:g}_{pt_trig_max:g}_pt_assoc_{pt_assoc_min:g}_{pt_assoc_max:g}"
# Do I want to use the pt suffix for the associated? What if the bins change??
# Maybe that's worth taking as the cost of doing it by hand? Or I could just map it here,
# since it's the only place that matters
# TODO: Try this... (will need to update the name elsewhere)
# TODO: We don't need this - the suffix is passed through to the data...
pt_suffix_map = {(4, 6): 0, (6, -1): 1}
self.init_observable(observable_type, observable, block, centrality, centrality_index)
# Histogram observable
self.plot_observable(observable_type, observable, centrality)
#-------------------------------------------------------------------------------------------
# Histogram inclusive jet observables
#-------------------------------------------------------------------------------------------
def plot_jet_observables(self, observable_type=''):
print()
print(f'Plot {observable_type} observables...')
for observable, block in self.config[observable_type].items():
for centrality_index,centrality in enumerate(block['centrality']):
for self.jet_R in block['jet_R']:
# Optional: Loop through pt bins
for pt_bin in range(len(block['pt'])-1):
if len(block['pt']) > 2:
pt_suffix = f'_pt{pt_bin}'
else:
pt_suffix = ''
# Optional: subobservable
subobservable_label_list = ['']
if 'kappa' in block:
subobservable_label_list = [f'_k{kappa}' for kappa in block['kappa']]
for subobservable_label in subobservable_label_list:
# Set normalization
self_normalize = False
for x in ['mass', 'g', 'ptd', 'charge', 'mg', 'zg', 'tg', 'xj']:
if x in observable:
self_normalize = True
if 'SoftDrop' in block:
for grooming_setting in block['SoftDrop']:
if observable == 'zg_alice' or observable == 'tg_alice':
if np.isclose(self.jet_R, 0.4) and centrality_index == 0:
continue
if np.isclose(self.jet_R, 0.2) and centrality_index == 1:
continue
print(f' grooming_setting = {grooming_setting}')
zcut = grooming_setting['zcut']
beta = grooming_setting['beta']
self.suffix = f'_R{self.jet_R}_zcut{zcut}_beta{beta}{subobservable_label}'
if 'hepdata' not in block and 'custom_data' not in block:
continue
# Initialize observable configuration
self.init_observable(observable_type, observable, block, centrality, centrality_index, pt_suffix=pt_suffix, self_normalize=self_normalize)
# Plot observable
self.plot_observable(observable_type, observable, centrality, pt_suffix)
else:
self.suffix = f'_R{self.jet_R}{subobservable_label}'
if 'hepdata' not in block and 'custom_data' not in block:
continue
# Initialize observable configuration
self.init_observable(observable_type, observable, block, centrality, centrality_index, pt_suffix=pt_suffix, self_normalize=self_normalize)
# Plot observable
self.plot_observable(observable_type, observable, centrality, pt_suffix)
#-------------------------------------------------------------------------------------------
# Histogram semi-inclusive jet observables
#-------------------------------------------------------------------------------------------
def plot_semi_inclusive_chjet_observables(self, observable_type=''):
print()
print(f'Plot {observable_type} observables...')
for observable, block in self.config[observable_type].items():
for centrality_index,centrality in enumerate(block['centrality']):
for self.jet_R in block['jet_R']:
self.suffix = f'_R{self.jet_R}'
if 'hepdata' not in block and 'custom_data' not in block:
continue
# Set normalization
self_normalize = False
for x in ['nsubjettiness']:
if x in observable:
self_normalize = True
# Initialize observable configuration
self.init_observable(observable_type, observable, block, centrality, centrality_index, self_normalize=self_normalize)
# Plot observable
self.plot_observable(observable_type, observable, centrality)
#-------------------------------------------------------------------------------------------
# Initialize a single observable's config
#-------------------------------------------------------------------------------------------
def init_observable(self, observable_type, observable, block, centrality, centrality_index, pt_suffix='', self_normalize=False):
# Initialize an empty dict containing relevant info
self.observable_settings = {}
# Initialize common settings into class members
self.init_common_settings(observable, block)
#-----------------------------------------------------------
# Initialize data distribution into self.observable_settings
if 'hepdata' in block:
self.observable_settings['data_distribution'] = self.plot_utils.tgraph_from_hepdata(block, self.is_AA, self.sqrts, observable_type, observable, centrality_index, suffix=self.suffix, pt_suffix=pt_suffix)
elif 'custom_data' in block:
self.observable_settings['data_distribution'] = self.plot_utils.tgraph_from_yaml(block, self.is_AA, self.sqrts, observable_type, observable, centrality_index, suffix=self.suffix, pt_suffix=pt_suffix)
else:
self.observable_settings['data_distribution'] = None
#-----------------------------------------------------------
# Initialize JETSCAPE distribution into self.observable_settings
self.initialize_jetscape_distribution(observable_type, observable, centrality, pt_suffix=pt_suffix, self_normalize=self_normalize)
#-----------------------------------------------------------
# For pp case -- form ratio of JETSCAPE to data and load into self.observable_settings
if not self.is_AA:
if self.observable_settings['data_distribution'] and self.observable_settings[f'jetscape_distribution'] and not self.observable_settings[f'jetscape_distribution'].InheritsFrom(ROOT.TH2.Class()) and not self.skip_pp_ratio:
self.observable_settings['ratio'] = self.plot_utils.divide_histogram_by_tgraph(self.observable_settings[f'jetscape_distribution'],
self.observable_settings['data_distribution'])
else:
self.observable_settings['ratio'] = None
#-------------------------------------------------------------------------------------------
# Initialize from settings from config file into class members
#-------------------------------------------------------------------------------------------
def init_common_settings(self, observable, block):
self.xtitle = block['xtitle']
if 'eta_cut' in block:
self.eta_cut = block['eta_cut']
if 'y_cut' in block:
self.y_cut = block['y_cut']
if 'pt' in block:
self.pt = block['pt']
if 'eta_cut_R' in block:
self.eta_R = block['eta_cut_R']
self.eta_cut = np.round(self.eta_R - self.jet_R, decimals=1)
if 'c_ref' in block:
index = block['jet_R'].index(self.jet_R)
self.c_ref = block['c_ref'][index]
if 'low_trigger_range' in block:
self.low_trigger_range = block['low_trigger_range']
if 'high_trigger_range' in block:
self.high_trigger_range = block['high_trigger_range']
if 'trigger_range' in block:
self.trigger_range = block['trigger_range']
if 'logy' in block:
self.logy = block['logy']
else:
self.logy = False
if self.is_AA:
if 'ytitle_AA' in block:
self.ytitle = block['ytitle_AA']
if 'y_min_AA' in block:
self.y_min = float(block['y_min_AA'])
self.y_max = float(block['y_max_AA'])
else:
self.y_min = 0.
self.y_max = 1.
else:
if 'ytitle_pp' in block:
self.ytitle = block['ytitle_pp']
else:
self.ytitle = ''
if 'y_min_pp' in block:
self.y_min = float(block['y_min_pp'])
self.y_max = float(block['y_max_pp'])
else:
self.y_min = 0.
self.y_max = 1.99
if 'y_ratio_min' in block:
self.y_ratio_min = block['y_ratio_min']
self.y_ratio_max = block['y_ratio_max']
else:
self.y_ratio_min = 0.
self.y_ratio_max = 1.99
if 'skip_pp' in block:
self.skip_pp = block['skip_pp']
else:
self.skip_pp = False
if 'skip_pp_ratio' in block:
self.skip_pp_ratio = block['skip_pp_ratio']
else:
self.skip_pp_ratio = False
if 'skip_AA_ratio' in block:
self.skip_AA_ratio = block['skip_AA_ratio']
else:
self.skip_AA_ratio = False
if 'scale_by' in block:
self.scale_by = block['scale_by']
else:
self.scale_by = None
# Flag to plot hole histogram (for hadron histograms only)
if self.is_AA:
self.subtract_holes = observable in ['pt_ch_alice', 'pt_pi_alice', 'pt_pi0_alice', 'pt_ch_cms',
'pt_ch_atlas', 'pt_pi0_phenix', 'pt_ch_star']
else:
self.subtract_holes = False
#-------------------------------------------------------------------------------------------
# Initialize JETSCAPE distribution into self.observable_settings
#-------------------------------------------------------------------------------------------
def initialize_jetscape_distribution(self, observable_type, observable, centrality, pt_suffix='', self_normalize=False):
#-------------------------------------------------------------
# AA
if self.is_AA:
# Add centrality bin to list, if needed
if centrality not in self.observable_centrality_list:
self.observable_centrality_list.append(centrality)
#-------------------------------------------------------------
# For hadron observables, we retrieve and subtract the hole histograms
if self.subtract_holes:
hole_labels = ['', '_holes']
for hole_label in hole_labels:
# Get histogram, and add to self.observable_settings
self.get_histogram(observable_type, observable, centrality, collection_label=hole_label, pt_suffix=pt_suffix)
# Normalization
# Note: If we divide by the sum of weights (corresponding to n_events) and multiply by the
# pt-hat cross-section, then JETSCAPE distribution gives cross-section: dsigma/dx (in mb)
self.scale_histogram(observable_type, observable, centrality,
collection_label=hole_label, pt_suffix=pt_suffix, self_normalize=self_normalize)
# Subtract the holes (and save unsubtracted histogram)
if self.observable_settings[f'jetscape_distribution']:
self.observable_settings['jetscape_distribution_unsubtracted'] = self.observable_settings[f'jetscape_distribution'].Clone()
self.observable_settings['jetscape_distribution_unsubtracted'].SetName('{}_unsubtracted'.format(self.observable_settings[f'jetscape_distribution'].GetName()))
self.observable_settings['jetscape_distribution'].Add(self.observable_settings['jetscape_distribution_holes'], -1)
# Perform any additional manipulations on scaled histograms
self.post_process_histogram(observable)
#-------------------------------------------------------------
# For jet histograms, loop through all available hole subtraction variations, and initialize histogram
else:
for jet_collection_label in self.jet_collection_labels:
| |
<gh_stars>1-10
# Copyright (c) 2006-2009 The Trustees of Indiana University.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# - Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# - Neither the Indiana University nor the names of its contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from corepy.spre.spe import MachineInstruction
from corepy.arch.x86.isa.x86_fields import *
#from corepy.arch.x86.lib.memory import MemoryReference
import corepy.arch.x86.types.registers as regs
# Utility functions for use by operand functions
# w8() just makes sure n is an unsigned byte; the others do this implicitly.
# little endian specific!!
# TODO - use this where it should be used
def w8(n):
return [(256 + n) & 0xFF]
def w16(n):
return [n & 0xFF, (n & 0xFF00) >> 8]
def w32(n):
return [n & 0xFF, (n & 0xFF00) >> 8, (n & 0xFF0000) >> 16, (n & 0xFF000000l) >> 24]
# ------------------------------
# Type Instances
# ------------------------------
# Instances for common types
# Constants
one_t = x86ConstantOperand("one", 1)
# Immediates
imm8_t = Imm8("imm8", (-128, 256))
simm8_t = Imm8("simm8", (-128, 128))
#uimm8_t = Imm8("uimm8", (0, 256))
imm16_t = Imm16("imm16", (-32768, 65536))
#uimm16_t = Imm16("uimm16", (0, 65536))
imm32_t = Imm32("imm32", (-2147483648, 4294967296))
simm32_t = Imm32("simm32", (-2147483648, 2147483648))
# Memory
mem_t = x86MemoryOperand("mem", None)
mem8_t = x86MemoryOperand("mem8", 8)
mem16_t = x86MemoryOperand("mem16", 16)
mem32_t = x86MemoryOperand("mem32", 32)
mem64_t = x86MemoryOperand("mem64", 64)
mem80_t = x86MemoryOperand("mem80", 80)
mem128_t = x86MemoryOperand("mem128", 128)
mem228_t = x86MemoryOperand("mem228", 228)
#mem512_t = x86MemoryOperand("mem512", 512)
mem752_t = x86MemoryOperand("mem752", 752)
mem4096_t = x86MemoryOperand("mem4096", 4096)
# Registers
reg8_t = x86RegisterOperand("reg8", regs.GPRegister8)
reg16_t = x86RegisterOperand("reg16", regs.GPRegister16)
reg32_t = x86RegisterOperand("reg32", regs.GPRegister32)
regst_t = x86RegisterOperand("regst", regs.FPRegister)
mmx_t = x86RegisterOperand("mmx", regs.MMXRegister)
xmm_t = x86RegisterOperand("xmm", regs.XMMRegister)
# Fixed Registers
al_t = FixedRegisterOperand("al", regs.GPRegister8, 0)
ax_t = FixedRegisterOperand("ax", regs.GPRegister16, 0)
cl_t = FixedRegisterOperand("cl", regs.GPRegister8, 1)
dx_t = FixedRegisterOperand("dx", regs.GPRegister16, 2)
eax_t = FixedRegisterOperand("eax", regs.GPRegister32, 0)
st0_t = FixedRegisterOperand("st0", regs.FPRegister, 0)
# Relative offsets
lbl8off_t = x86LabelOperand("lbl8off", (-128, 128))
lbl16off_t = x86LabelOperand("lbl16off", (-65536, 65536))
lbl32off_t = x86LabelOperand("lbl32off", (-2147483648, 2147483648))
rel8off_t = Rel8off("rel8off", (-128, 128))
rel16off_t = Rel16off("rel16off", (-32768, 32768))
rel32off_t = Rel32off("rel32off", (-2147483648, 2147483648))
# Prefix bytes
prefix = []
lock_p = x86PrefixOperand("lock", 0xF0)
addr_p = x86PrefixOperand("addr", 0x67)
# ------------------------------
# x86 Machine Instructions
# ------------------------------
# TODO - 64bit size memrefs
#def common_memref(opcode, ref, modrm):
# if ref.addr != None: # addr (not valid for call??)
# if ref.size == 16:
# return [0x66] + opcode + [0x05 | modrm] + w32(ref.addr)
# if ref.size == 32:
# return opcode + [0x05 | modrm] + w32(ref.addr)
# elif isinstance(ref.reg, regs.GPRegister32):
# if ref.disp != None: # [base + disp]
# if isinstance(ref.index, regs.GPRegister32): # [base+disp+index*scale]
# sib = ref.scale | (ref.index.reg << 3) | ref.reg.reg
# if imm8_t.fits(ref.disp):
# return opcode + [0x44 | modrm, sib, ref.disp]
# elif imm32_t.fits(ref.disp):
# return opcode + [0x84 | modrm, sib] + w32(ref.disp)
# elif ref.index == None: # [base + disp]
# if ref.reg == regs.esp:
# if imm8_t.fits(ref.disp): # [esp + disp]
# return opcode + [0x44 | modrm, 0x24, ref.disp]
# elif imm32_t.fits(ref.disp):
# return opcode + [0x80 | modrm, 0x24] + w32(ref.disp)
# elif imm8_t.fits(ref.disp):
# return opcode + [0x40 | modrm | ref.reg.reg, ref.disp]
# elif imm32_t.fits(ref.disp):
# return opcode + [0x80 | modrm | ref.reg.reg] + w32(ref.disp)
# elif ref.disp == None:
# if ref.reg == regs.ebp:
# return opcode + [0x45 | modrm, 0x00] # [ebp]
# elif ref.reg == regs.esp:
# return opcode + [0x04 | modrm, 0x24] # [esp]
# return opcode + [modrm | ref.reg.reg] # [base]
def common_memref_modrm(opcode, ref, modrm):
if ref.disp != None and ref.disp != 0: # [base + disp]
if ref.index != None: # [base+index*scale+disp]
if ref.base != None:
sib = ref.scale_sib | (ref.index.reg << 3) | ref.base.reg
if simm8_t.fits(ref.disp):
return opcode + [0x44 | modrm, sib] + w8(ref.disp)
elif simm32_t.fits(ref.disp):
return opcode + [0x84 | modrm, sib] + w32(ref.disp)
else: # [base + index*scale]
# "displacement only" addressing mode
sib = ref.scale_sib | (ref.index.reg << 3) | 5
return opcode + [0x04 | modrm, sib] + w32(ref.disp)
elif ref.index == None: # [base + disp]
if ref.base == regs.esp:
if simm8_t.fits(ref.disp): # [esp + disp]
return opcode + [0x44 | modrm, 0x24] + w8(ref.disp)
elif simm32_t.fits(ref.disp):
return opcode + [0x80 | modrm, 0x24] + w32(ref.disp)
else:
raise AttributeError("invalid/unsupported addressing mode")
elif simm8_t.fits(ref.disp):
return opcode + [0x40 | modrm | ref.base.reg] + w8(ref.disp)
elif simm32_t.fits(ref.disp):
return opcode + [0x80 | modrm | ref.base.reg] + w32(ref.disp)
else:
raise AttributeError("invalid/unsupported addressing mode")
else:
raise AttributeError("invalid/unsupported addressing mode")
elif ref.index != None:
sib = ref.scale_sib | (ref.index.reg << 3) | ref.base.reg
if ref.base == regs.ebp:
return opcode + [0x44 | modrm, sib, 0x00] # [ebp, index]
return opcode + [0x04 | modrm, sib]
elif ref.index == None:
if ref.base == regs.ebp:
return opcode + [0x45 | modrm, 0x00] # [rbp], [r13]
elif ref.base == regs.esp:
return opcode + [0x04 | modrm, 0x24] # [rsp], [r12]
return opcode + [modrm | ref.base.reg] # [base]
else:
raise AttributeError("invalid/unsupported addressing mode")
def common_memref(opcode, ref, modrm):
if ref.addr != None: # Absolute address
return opcode + [0x05 | modrm] + w32(ref.addr)
elif ref.addr_size == 32: # 32bit modRM address
return common_memref_modrm(opcode, ref, modrm)
elif ref.addr_size == 16: # 16bit modRM address
return [0x67] + common_memref_modrm(opcode, ref, modrm)
else:
raise AttributeError("invalid/unsupported addressing mode")
class al_dx(MachineInstruction):
signature = (al_t, dx_t)
opt_kw = ()
def _render(params, operands):
return params['opcode']
render = staticmethod(_render)
class al_imm8(MachineInstruction):
signature = (al_t, imm8_t)
opt_kw = ()
def _render(params, operands):
return params['opcode'] + w8(operands['imm8'])
render = staticmethod(_render)
class ax(MachineInstruction):
signature = (ax_t,)
opt_kw = ()
def _render(params, operands):
return params['opcode']
render = staticmethod(_render)
class ax_dx(MachineInstruction):
signature = (ax_t, dx_t)
opt_kw = ()
def _render(params, operands):
return params['opcode']
render = staticmethod(_render)
class ax_imm16(MachineInstruction):
signature = (ax_t, imm16_t)
opt_kw = ()
def _render(params, operands):
return params['opcode'] + w16(operands['imm16'])
render = staticmethod(_render)
class ax_imm8(MachineInstruction):
signature = (ax_t, imm8_t)
opt_kw = ()
def _render(params, operands):
return params['opcode'] + w8(operands['imm8'])
render = staticmethod(_render)
class ax_reg16(MachineInstruction):
signature = (ax_t, reg16_t)
opt_kw = ()
def _render(params, operands):
opcode = params['opcode']
return opcode[:-1] + [opcode[-1] + operands['reg16'].reg]
render = staticmethod(_render)
class dx_al(MachineInstruction):
signature = (dx_t, al_t)
opt_kw = ()
def _render(params, operands):
return params['opcode']
render = staticmethod(_render)
class dx_ax(MachineInstruction):
signature = (dx_t, ax_t)
opt_kw = ()
def _render(params, operands):
return params['opcode']
render = staticmethod(_render)
class dx_eax(MachineInstruction):
signature = (dx_t, eax_t)
opt_kw = ()
def _render(params, operands):
return params['opcode']
render = staticmethod(_render)
class eax_dx(MachineInstruction):
signature = (eax_t, dx_t)
opt_kw = ()
def _render(params, operands):
return params['opcode']
render = staticmethod(_render)
class eax_imm32(MachineInstruction):
signature = (eax_t, imm32_t)
opt_kw = ()
def _render(params, operands):
return params['opcode'] + w32(operands['imm32'])
render = staticmethod(_render)
class eax_imm8(MachineInstruction):
signature = (eax_t, imm8_t)
opt_kw = ()
def _render(params, operands):
return params['opcode'] + w8(operands['imm8'])
render = staticmethod(_render)
class eax_reg32(MachineInstruction):
signature = (eax_t, reg32_t)
opt_kw = ()
def _render(params, operands):
opcode = params['opcode']
return opcode[:-1] + [opcode[-1] + operands['reg32'].reg]
render = staticmethod(_render)
class imm16(MachineInstruction):
signature = (imm16_t,)
opt_kw = ()
def _render(params, operands):
return params['opcode'] + w16(operands['imm16'])
render = staticmethod(_render)
class imm16_imm8(MachineInstruction):
signature = (imm16_t, imm8_t)
opt_kw = ()
def _render(params, operands):
return params['opcode'] + w16(operands['imm16']) | |
<gh_stars>10-100
import json
import logging
from rdr_service.lib_fhir.fhirclient_1_0_6.models import observation as fhir_observation
from rdr_service.lib_fhir.fhirclient_1_0_6.models.fhirabstractbase import FHIRValidationError
from sqlalchemy.orm import subqueryload
from sqlalchemy.orm.attributes import flag_modified
from werkzeug.exceptions import BadRequest
from rdr_service import clock
from rdr_service.api_util import parse_date
from rdr_service.concepts import Concept
from rdr_service.dao.base_dao import UpdatableDao
from rdr_service.dao.participant_dao import ParticipantDao, raise_if_withdrawn
from rdr_service.dao.participant_summary_dao import ParticipantSummaryDao
from rdr_service.dao.site_dao import SiteDao
from rdr_service.model.log_position import LogPosition
from rdr_service.model.measurements import Measurement, PhysicalMeasurements
from rdr_service.participant_enums import PhysicalMeasurementsStatus
_AMENDMENT_URL = "http://terminology.pmi-ops.org/StructureDefinition/amends"
_OBSERVATION_RESOURCE_TYPE = "Observation"
_COMPOSITION_RESOURCE_TYPE = "Composition"
_CREATED_LOC_EXTENSION = "http://terminology.pmi-ops.org/StructureDefinition/authored-location"
_FINALIZED_LOC_EXTENSION = "http://terminology.pmi-ops.org/StructureDefinition/finalized-location"
_PM_SYSTEM_PREFIX = "http://terminology.pmi-ops.org/CodeSystem/"
_AUTHORING_STEP = "http://terminology.pmi-ops.org/StructureDefinition/authoring-step"
_CREATED_STATUS = "created"
_FINALIZED_STATUS = "finalized"
_LOCATION_PREFIX = "Location/"
_AUTHOR_PREFIX = "Practitioner/"
_QUALIFIED_BY_RELATED_TYPE = "qualified-by"
_ALL_EXTENSIONS = set([_AMENDMENT_URL, _CREATED_LOC_EXTENSION, _FINALIZED_LOC_EXTENSION])
_BYTE_LIMIT = 65535 # 65535 chars, 64KB
class PhysicalMeasurementsDao(UpdatableDao):
def __init__(self):
super(PhysicalMeasurementsDao, self).__init__(PhysicalMeasurements, order_by_ending=["logPositionId"])
def get_id(self, obj):
return obj.physicalMeasurementsId
def get_with_session(self, session, obj_id, **kwargs):
result = super(PhysicalMeasurementsDao, self).get_with_session(session, obj_id, **kwargs)
if result:
ParticipantDao().validate_participant_reference(session, result)
return result
def get_with_children(self, physical_measurements_id, for_update=False):
"""Make a new session and query db."""
with self.session() as session:
return self.get_with_children_with_session(session, physical_measurements_id, for_update)
def get_with_children_with_session(self, session, physical_measurements_id, for_update=False):
"""Pass in an existing session to query db."""
query = (
session.query(PhysicalMeasurements)
.options(subqueryload(PhysicalMeasurements.measurements).subqueryload(Measurement.measurements))
.options(subqueryload(PhysicalMeasurements.measurements).subqueryload(Measurement.qualifiers))
)
if for_update:
query = query.with_for_update()
return query.get(physical_measurements_id)
def get_measuremnets_for_participant(self, pid):
with self.session() as session:
query = session.query(PhysicalMeasurements).filter(PhysicalMeasurements.participantId == pid).all()
return query
def get_date_from_pm_resource(self, pid, pm_id):
""" Retrieves a specific measurement and fetches the date from the measurement payload
which corresponds to 'finalized date'.
:param pid = participant id
:param pm_id = physical measurement id
:returns date from resource of measurement payload - UTC time"""
with self.session() as session:
record = session.query(PhysicalMeasurements).filter(PhysicalMeasurements.participantId == pid)\
.filter(PhysicalMeasurements.physicalMeasurementsId == pm_id).first()
doc, composition = self.load_record_fhir_doc(record) # pylint: disable=unused-variable
measurement_date = composition['date']
original_date = parse_date(measurement_date)
return original_date
@staticmethod
def handle_measurement(measurement_map, m):
"""Populating measurement_map with information extracted from measurement and its
descendants."""
code_concept = Concept(m.codeSystem, m.codeValue)
measurement_data = measurement_map.get(code_concept)
if not measurement_data:
measurement_data = {
"bodySites": set(),
"types": set(),
"units": set(),
"codes": set(),
"submeasurements": set(),
"qualifiers": set(),
}
measurement_map[code_concept] = measurement_data
if m.bodySiteCodeSystem:
measurement_data["bodySites"].add(Concept(m.bodySiteCodeSystem, m.bodySiteCodeValue))
if m.valueString:
if len(m.valueString) > _BYTE_LIMIT:
raise BadRequest("Notes field exceeds limit.")
measurement_data["types"].add("string")
if m.valueDecimal:
measurement_data["types"].add("decimal")
min_decimal = measurement_data.get("min")
max_decimal = measurement_data.get("max")
if min_decimal is None or min_decimal > m.valueDecimal:
measurement_data["min"] = m.valueDecimal
if max_decimal is None or max_decimal < m.valueDecimal:
measurement_data["max"] = m.valueDecimal
if m.valueUnit:
measurement_data["units"].add(m.valueUnit)
if m.valueCodeSystem:
measurement_data["codes"].add(Concept(m.valueCodeSystem, m.valueCodeValue))
if m.valueDateTime:
measurement_data["types"].add("date")
for sm in m.measurements:
measurement_data["submeasurements"].add(Concept(sm.codeSystem, sm.codeValue))
PhysicalMeasurementsDao.handle_measurement(measurement_map, sm)
for q in m.qualifiers:
measurement_data["qualifiers"].add(Concept(q.codeSystem, q.codeValue))
def get_distinct_measurements(self):
"""Returns metadata about all the distinct physical measurements in use for participants."""
with self.session() as session:
measurement_map = {}
for pms in session.query(PhysicalMeasurements).yield_per(100):
try:
doc, composition = self.load_record_fhir_doc(pms) # pylint: disable=unused-variable
parsed_pms = PhysicalMeasurementsDao.from_client_json(doc, pms.participantId)
for measurement in parsed_pms.measurements:
PhysicalMeasurementsDao.handle_measurement(measurement_map, measurement)
except FHIRValidationError as e:
logging.error(f"Could not parse measurements as FHIR: {pms.resource}; exception = {e}")
return measurement_map
@staticmethod
def concept_json(concept):
return {"system": concept.system, "code": concept.code}
@staticmethod
def get_measurements_json(concept, measurement_data, m_map):
result = {}
result["code"] = PhysicalMeasurementsDao.concept_json(concept)
result["bodySites"] = list(
PhysicalMeasurementsDao.concept_json(body_concept) for body_concept in measurement_data["bodySites"]
)
result["types"] = list(measurement_data["types"])
result["units"] = list(measurement_data["units"])
if measurement_data.get("min"):
result["min"] = measurement_data["min"]
if measurement_data.get("max"):
result["max"] = measurement_data["max"]
result["valueCodes"] = list(
PhysicalMeasurementsDao.concept_json(code_concept) for code_concept in measurement_data["codes"]
)
result["qualifiers"] = list(
PhysicalMeasurementsDao.concept_json(qualifier_concept)
for qualifier_concept in measurement_data["qualifiers"]
)
result["submeasurements"] = [
PhysicalMeasurementsDao.get_measurements_json(sm, m_map[sm], m_map)
for sm in measurement_data["submeasurements"]
]
return result
def get_distinct_measurements_json(self):
"""Returns metadata about all the distinct physical measurements in use for participants,
in a JSON format that can be used to generate fake physical measurement data later."""
measurement_map = self.get_distinct_measurements()
measurements_json = []
submeasurements = set()
for concept, measurement_data in list(measurement_map.items()):
for submeasurement_concept in measurement_data["submeasurements"]:
submeasurements.add(submeasurement_concept)
for concept, measurement_data in list(measurement_map.items()):
# Only include submeasurements under their parents.
if concept not in submeasurements:
measurements_json.append(
PhysicalMeasurementsDao.get_measurements_json(concept, measurement_data, measurement_map)
)
return measurements_json
def _initialize_query(self, session, query_def):
participant_id = None
for field_filter in query_def.field_filters:
if field_filter.field_name == "participantId":
participant_id = field_filter.value
break
# Sync queries don't specify a participant ID, and can return measurements for participants
# who have subsequently withdrawn; for all requests that do specify a participant ID,
# make sure the participant exists and is not withdrawn.
if participant_id:
ParticipantDao().validate_participant_id(session, participant_id)
return super(PhysicalMeasurementsDao, self)._initialize_query(session, query_def)
@staticmethod
def _measurements_as_dict(measurements):
result = measurements.asdict()
del result["physicalMeasurementsId"]
del result["created"]
del result["logPositionId"]
if result["resource"].get("id", None):
del result["resource"]["id"]
return result
@staticmethod
def set_measurement_ids(physical_measurements):
measurement_count = 0
pm_id = physical_measurements.physicalMeasurementsId
for measurement in physical_measurements.measurements:
measurement.physicalMeasurementsId = pm_id
measurement.measurementId = PhysicalMeasurementsDao.make_measurement_id(pm_id, measurement_count)
measurement_count += 1
for sub_measurement in measurement.measurements:
sub_measurement.physicalMeasurementsId = pm_id
sub_measurement.measurementId = PhysicalMeasurementsDao.make_measurement_id(pm_id, measurement_count)
measurement_count += 1
def insert_with_session(self, session, obj):
is_amendment = False
obj.logPosition = LogPosition()
obj.final = True
obj.created = clock.CLOCK.now()
resource_json = obj.resource if isinstance(obj.resource, dict) else json.loads(obj.resource)
finalized_date = resource_json["entry"][0]["resource"].get("date")
if finalized_date:
obj.finalized = parse_date(finalized_date)
for extension in resource_json["entry"][0]["resource"].get("extension", []):
url = extension.get("url")
if url not in _ALL_EXTENSIONS:
logging.info(
f"Ignoring unsupported extension for PhysicalMeasurements: {url}. \
Expected one of: {_ALL_EXTENSIONS}"
)
continue
if url == _AMENDMENT_URL:
self._update_amended(obj, extension, url, session)
is_amendment = True
break
participant_summary = self._update_participant_summary(session, obj, is_amendment)
existing_measurements = (
session.query(PhysicalMeasurements).filter(PhysicalMeasurements.participantId == obj.participantId).all()
)
if existing_measurements:
new_dict = self._measurements_as_dict(obj)
for measurements in existing_measurements:
if self._measurements_as_dict(measurements) == new_dict:
# If there are already measurements that look exactly like this, return them
# without inserting new measurements.
return measurements
PhysicalMeasurementsDao.set_measurement_ids(obj)
inserted_obj = super(PhysicalMeasurementsDao, self).insert_with_session(session, obj)
if not is_amendment: # Amendments aren't expected to have site ID extensions.
if participant_summary.biospecimenCollectedSiteId is None:
ParticipantDao().add_missing_hpo_from_site(
session, inserted_obj.participantId, inserted_obj.finalizedSiteId
)
# Flush to assign an ID to the measurements, as the client doesn't provide one.
session.flush()
# Update the resource to contain the ID.
resource_json["id"] = str(obj.physicalMeasurementsId)
obj = self.store_record_fhir_doc(obj, resource_json)
return obj
def _update_participant_summary(self, session, obj, is_amendment=False):
participant_id = obj.participantId
if participant_id is None:
raise BadRequest("participantId is required")
participant_summary_dao = ParticipantSummaryDao()
participant = ParticipantDao().get_for_update(session, participant_id)
if not participant:
raise BadRequest(f"Can't submit physical measurements for unknown participant {participant_id}")
participant_summary = participant.participantSummary
if not participant_summary:
raise BadRequest(f"Can't submit physical measurements for participant {participant_id} without consent")
raise_if_withdrawn(participant_summary)
participant_summary.lastModified = clock.CLOCK.now()
is_distinct_visit = participant_summary_dao.calculate_distinct_visits(
participant_id, obj.finalized, obj.physicalMeasurementsId
)
if (
obj.status
and obj.status == PhysicalMeasurementsStatus.CANCELLED
and is_distinct_visit
and not is_amendment
):
participant_summary.numberDistinctVisits -= 1
# These fields set on measurement that is cancelled and doesn't have a previous good measurement
if (
obj.status
and obj.status == PhysicalMeasurementsStatus.CANCELLED
and not self.has_uncancelled_pm(session, participant)
):
participant_summary.physicalMeasurementsStatus = PhysicalMeasurementsStatus.CANCELLED
participant_summary.physicalMeasurementsTime = None
participant_summary.physicalMeasurementsFinalizedTime = None
participant_summary.physicalMeasurementsFinalizedSiteId = None
# These fields set on any measurement not cancelled
elif obj.status != PhysicalMeasurementsStatus.CANCELLED:
# new PM or if a PM was restored, it is complete again.
participant_summary.physicalMeasurementsStatus = PhysicalMeasurementsStatus.COMPLETED
participant_summary.physicalMeasurementsTime = obj.created
participant_summary.physicalMeasurementsFinalizedTime = obj.finalized
participant_summary.physicalMeasurementsCreatedSiteId = obj.createdSiteId
participant_summary.physicalMeasurementsFinalizedSiteId = obj.finalizedSiteId
if is_distinct_visit and not is_amendment:
participant_summary.numberDistinctVisits += 1
elif (
obj.status
and obj.status == PhysicalMeasurementsStatus.CANCELLED
and self.has_uncancelled_pm(session, participant)
):
get_latest_pm = self.get_latest_pm(session, participant)
participant_summary.physicalMeasurementsFinalizedTime = get_latest_pm.finalized
participant_summary.physicalMeasurementsTime = get_latest_pm.created
participant_summary.physicalMeasurementsCreatedSiteId = get_latest_pm.createdSiteId
participant_summary.physicalMeasurementsFinalizedSiteId = get_latest_pm.finalizedSiteId
participant_summary_dao.update_enrollment_status(participant_summary)
session.merge(participant_summary)
return participant_summary
def get_latest_pm(self, session, participant):
return (
session.query(PhysicalMeasurements)
.filter_by(participantId=participant.participantId)
.filter(PhysicalMeasurements.finalized != None)
.order_by(PhysicalMeasurements.finalized.desc())
.first()
)
def has_uncancelled_pm(self, session, participant):
"""return True if participant has at least one physical measurement that is not cancelled"""
query = (
session.query(PhysicalMeasurements.status)
.filter_by(participantId=participant.participantId)
.filter(PhysicalMeasurements.finalized != None)
.all()
)
valid_pm = False
for pm in query:
if pm.status != PhysicalMeasurementsStatus.CANCELLED:
valid_pm = True
return valid_pm
def insert(self, obj):
if obj.physicalMeasurementsId:
return super(PhysicalMeasurementsDao, self).insert(obj)
return self._insert_with_random_id(obj, ["physicalMeasurementsId"])
def _update_amended(self, obj, extension, url, session):
"""Finds the measurements that are being amended; sets the resource status to 'amended',
the 'final' flag to False, and sets the new measurements' amendedMeasurementsId field to
its ID."""
value_ref = extension.get("valueReference")
if value_ref is None:
raise BadRequest(f"No valueReference in extension {url}.")
ref = value_ref.get("reference")
if ref is None:
raise BadRequest(f"No reference in extension {url}.")
type_name, ref_id = ref.split("/")
if type_name != "PhysicalMeasurements":
raise BadRequest(f"Bad reference type in extension {url}: {ref}.")
try:
amended_measurement_id = int(ref_id)
except ValueError:
raise BadRequest(f"Invalid ref id: {ref_id}")
amended_measurement = self.get_with_session(session, amended_measurement_id)
if amended_measurement is None:
raise BadRequest(f"Amendment references unknown PhysicalMeasurement {ref_id}.")
amended_resource_json, composition = self.load_record_fhir_doc(amended_measurement)
composition["status"] = "amended"
amended_measurement.final = False
amended_measurement = self.store_record_fhir_doc(amended_measurement, amended_resource_json)
session.merge(amended_measurement)
obj.amendedMeasurementsId = amended_measurement_id
def update_with_patch(self, id_, session, resource):
record = self.get_with_children_with_session(session, id_, for_update=True)
return self._do_update_with_patch(session, record, resource)
def patch(self, id_, resource, p_id):
# pylint: disable=unused-argument
with self.session() as session:
# resource = request.get_json(force=True)
order = self.update_with_patch(id_, | |
= ros_msg_.id
pb_msg_.parent = ros_msg_.parent
pb_msg.groups.append(pb_msg_)
yield pb_msg
rospy.sleep(0.01)
class usb_cam_image_raw_compressedDepthServicer(ros_grpc.usb_cam_image_raw_compressedDepthServicer):
def __init__(self):
self.pub = None
self.Msg = roslib.message.get_message_class('sensor_msgs/CompressedImage')
def Publish(self, pb_msg, context):
if self.pub == None:
self.pub = rospy.Publisher('/usb_cam/image_raw/compressedDepth', self.Msg, queue_size=10)
ros_msg = self.Msg()
ros_msg.header.seq = pb_msg.header.seq
ros_msg.header.stamp.secs = pb_msg.header.stamp.secs
ros_msg.header.stamp.nsecs = pb_msg.header.stamp.nsecs
ros_msg.header.frame_id = pb_msg.header.frame_id
ros_msg.format = pb_msg.format
ros_msg.data = pb_msg.data
self.pub.publish(ros_msg)
return ros_pb.Empty()
def Subscribe(self, request, context):
c = {'unsubscribed': False}
ros_messages = []
def callback(ros_msg):
ros_messages.append(ros_msg)
subscription = rospy.Subscriber('/usb_cam/image_raw/compressedDepth', self.Msg, callback)
def on_rpc_done():
c['unsubscribed'] = True
print("Attempting to regain servicer thread...", c)
subscription.unregister()
context.add_callback(on_rpc_done)
while not c['unsubscribed']:
while ros_messages:
ros_msg = ros_messages.pop(0)
pb_msg = ros_pb.sensor_msgs.CompressedImage()
pb_msg.header.seq = ros_msg.header.seq
pb_msg.header.stamp.secs = ros_msg.header.stamp.secs
pb_msg.header.stamp.nsecs = ros_msg.header.stamp.nsecs
pb_msg.header.frame_id = ros_msg.header.frame_id
pb_msg.format = ros_msg.format
pb_msg.data = ros_msg.data
yield pb_msg
rospy.sleep(0.01)
class usb_cam_image_raw_compressedDepth_parameter_descriptionsServicer(ros_grpc.usb_cam_image_raw_compressedDepth_parameter_descriptionsServicer):
def __init__(self):
self.pub = None
self.Msg = roslib.message.get_message_class('dynamic_reconfigure/ConfigDescription')
def Publish(self, pb_msg, context):
if self.pub == None:
self.pub = rospy.Publisher('/usb_cam/image_raw/compressedDepth/parameter_descriptions', self.Msg, queue_size=10)
ros_msg = self.Msg()
for pb_msg_ in pb_msg.groups:
ros_msg_ = roslib.message.get_message_class('dynamic_reconfigure/Group')()
ros_msg_.name = pb_msg_.name
ros_msg_.type = pb_msg_.type
for pb_msg__ in pb_msg_.parameters:
ros_msg__ = roslib.message.get_message_class('dynamic_reconfigure/ParamDescription')()
ros_msg__.name = pb_msg__.name
ros_msg__.type = pb_msg__.type
ros_msg__.level = pb_msg__.level
ros_msg__.description = pb_msg__.description
ros_msg__.edit_method = pb_msg__.edit_method
ros_msg_.parameters.append(ros_msg__)
ros_msg_.parent = pb_msg_.parent
ros_msg_.id = pb_msg_.id
ros_msg.groups.append(ros_msg_)
for pb_msg_ in pb_msg.max.bools:
ros_msg_ = roslib.message.get_message_class('dynamic_reconfigure/BoolParameter')()
ros_msg_.name = pb_msg_.name
ros_msg_.value = pb_msg_.value
ros_msg.max.bools.append(ros_msg_)
for pb_msg_ in pb_msg.max.ints:
ros_msg_ = roslib.message.get_message_class('dynamic_reconfigure/IntParameter')()
ros_msg_.name = pb_msg_.name
ros_msg_.value = pb_msg_.value
ros_msg.max.ints.append(ros_msg_)
for pb_msg_ in pb_msg.max.strs:
ros_msg_ = roslib.message.get_message_class('dynamic_reconfigure/StrParameter')()
ros_msg_.name = pb_msg_.name
ros_msg_.value = pb_msg_.value
ros_msg.max.strs.append(ros_msg_)
for pb_msg_ in pb_msg.max.doubles:
ros_msg_ = roslib.message.get_message_class('dynamic_reconfigure/DoubleParameter')()
ros_msg_.name = pb_msg_.name
ros_msg_.value = pb_msg_.value
ros_msg.max.doubles.append(ros_msg_)
for pb_msg_ in pb_msg.max.groups:
ros_msg_ = roslib.message.get_message_class('dynamic_reconfigure/GroupState')()
ros_msg_.name = pb_msg_.name
ros_msg_.state = pb_msg_.state
ros_msg_.id = pb_msg_.id
ros_msg_.parent = pb_msg_.parent
ros_msg.max.groups.append(ros_msg_)
for pb_msg_ in pb_msg.min.bools:
ros_msg_ = roslib.message.get_message_class('dynamic_reconfigure/BoolParameter')()
ros_msg_.name = pb_msg_.name
ros_msg_.value = pb_msg_.value
ros_msg.min.bools.append(ros_msg_)
for pb_msg_ in pb_msg.min.ints:
ros_msg_ = roslib.message.get_message_class('dynamic_reconfigure/IntParameter')()
ros_msg_.name = pb_msg_.name
ros_msg_.value = pb_msg_.value
ros_msg.min.ints.append(ros_msg_)
for pb_msg_ in pb_msg.min.strs:
ros_msg_ = roslib.message.get_message_class('dynamic_reconfigure/StrParameter')()
ros_msg_.name = pb_msg_.name
ros_msg_.value = pb_msg_.value
ros_msg.min.strs.append(ros_msg_)
for pb_msg_ in pb_msg.min.doubles:
ros_msg_ = roslib.message.get_message_class('dynamic_reconfigure/DoubleParameter')()
ros_msg_.name = pb_msg_.name
ros_msg_.value = pb_msg_.value
ros_msg.min.doubles.append(ros_msg_)
for pb_msg_ in pb_msg.min.groups:
ros_msg_ = roslib.message.get_message_class('dynamic_reconfigure/GroupState')()
ros_msg_.name = pb_msg_.name
ros_msg_.state = pb_msg_.state
ros_msg_.id = pb_msg_.id
ros_msg_.parent = pb_msg_.parent
ros_msg.min.groups.append(ros_msg_)
for pb_msg_ in pb_msg.dflt.bools:
ros_msg_ = roslib.message.get_message_class('dynamic_reconfigure/BoolParameter')()
ros_msg_.name = pb_msg_.name
ros_msg_.value = pb_msg_.value
ros_msg.dflt.bools.append(ros_msg_)
for pb_msg_ in pb_msg.dflt.ints:
ros_msg_ = roslib.message.get_message_class('dynamic_reconfigure/IntParameter')()
ros_msg_.name = pb_msg_.name
ros_msg_.value = pb_msg_.value
ros_msg.dflt.ints.append(ros_msg_)
for pb_msg_ in pb_msg.dflt.strs:
ros_msg_ = roslib.message.get_message_class('dynamic_reconfigure/StrParameter')()
ros_msg_.name = pb_msg_.name
ros_msg_.value = pb_msg_.value
ros_msg.dflt.strs.append(ros_msg_)
for pb_msg_ in pb_msg.dflt.doubles:
ros_msg_ = roslib.message.get_message_class('dynamic_reconfigure/DoubleParameter')()
ros_msg_.name = pb_msg_.name
ros_msg_.value = pb_msg_.value
ros_msg.dflt.doubles.append(ros_msg_)
for pb_msg_ in pb_msg.dflt.groups:
ros_msg_ = roslib.message.get_message_class('dynamic_reconfigure/GroupState')()
ros_msg_.name = pb_msg_.name
ros_msg_.state = pb_msg_.state
ros_msg_.id = pb_msg_.id
ros_msg_.parent = pb_msg_.parent
ros_msg.dflt.groups.append(ros_msg_)
self.pub.publish(ros_msg)
return ros_pb.Empty()
def Subscribe(self, request, context):
c = {'unsubscribed': False}
ros_messages = []
def callback(ros_msg):
ros_messages.append(ros_msg)
subscription = rospy.Subscriber('/usb_cam/image_raw/compressedDepth/parameter_descriptions', self.Msg, callback)
def on_rpc_done():
c['unsubscribed'] = True
print("Attempting to regain servicer thread...", c)
subscription.unregister()
context.add_callback(on_rpc_done)
while not c['unsubscribed']:
while ros_messages:
ros_msg = ros_messages.pop(0)
pb_msg = ros_pb.dynamic_reconfigure.ConfigDescription()
for ros_msg_ in ros_msg.groups:
pb_msg_ = ros_pb.dynamic_reconfigure.Group()
pb_msg_.name = ros_msg_.name
pb_msg_.type = ros_msg_.type
for ros_msg__ in ros_msg_.parameters:
pb_msg__ = ros_pb.dynamic_reconfigure.ParamDescription()
pb_msg__.name = ros_msg__.name
pb_msg__.type = ros_msg__.type
pb_msg__.level = ros_msg__.level
pb_msg__.description = ros_msg__.description
pb_msg__.edit_method = ros_msg__.edit_method
pb_msg_.parameters.append(pb_msg__)
pb_msg_.parent = ros_msg_.parent
pb_msg_.id = ros_msg_.id
pb_msg.groups.append(pb_msg_)
for ros_msg_ in ros_msg.max.bools:
pb_msg_ = ros_pb.dynamic_reconfigure.BoolParameter()
pb_msg_.name = ros_msg_.name
pb_msg_.value = ros_msg_.value
pb_msg.max.bools.append(pb_msg_)
for ros_msg_ in ros_msg.max.ints:
pb_msg_ = ros_pb.dynamic_reconfigure.IntParameter()
pb_msg_.name = ros_msg_.name
pb_msg_.value = ros_msg_.value
pb_msg.max.ints.append(pb_msg_)
for ros_msg_ in ros_msg.max.strs:
pb_msg_ = ros_pb.dynamic_reconfigure.StrParameter()
pb_msg_.name = ros_msg_.name
pb_msg_.value = ros_msg_.value
pb_msg.max.strs.append(pb_msg_)
for ros_msg_ in ros_msg.max.doubles:
pb_msg_ = ros_pb.dynamic_reconfigure.DoubleParameter()
pb_msg_.name = ros_msg_.name
pb_msg_.value = ros_msg_.value
pb_msg.max.doubles.append(pb_msg_)
for ros_msg_ in ros_msg.max.groups:
pb_msg_ = ros_pb.dynamic_reconfigure.GroupState()
pb_msg_.name = ros_msg_.name
pb_msg_.state = ros_msg_.state
pb_msg_.id = ros_msg_.id
pb_msg_.parent = ros_msg_.parent
pb_msg.max.groups.append(pb_msg_)
for ros_msg_ in ros_msg.min.bools:
pb_msg_ = ros_pb.dynamic_reconfigure.BoolParameter()
pb_msg_.name = ros_msg_.name
pb_msg_.value = ros_msg_.value
pb_msg.min.bools.append(pb_msg_)
for ros_msg_ in ros_msg.min.ints:
pb_msg_ = ros_pb.dynamic_reconfigure.IntParameter()
pb_msg_.name = ros_msg_.name
pb_msg_.value = ros_msg_.value
pb_msg.min.ints.append(pb_msg_)
for ros_msg_ in ros_msg.min.strs:
pb_msg_ = ros_pb.dynamic_reconfigure.StrParameter()
pb_msg_.name = ros_msg_.name
pb_msg_.value = ros_msg_.value
pb_msg.min.strs.append(pb_msg_)
for ros_msg_ in ros_msg.min.doubles:
pb_msg_ = ros_pb.dynamic_reconfigure.DoubleParameter()
pb_msg_.name = ros_msg_.name
pb_msg_.value = ros_msg_.value
pb_msg.min.doubles.append(pb_msg_)
for ros_msg_ in ros_msg.min.groups:
pb_msg_ = ros_pb.dynamic_reconfigure.GroupState()
pb_msg_.name = ros_msg_.name
pb_msg_.state = ros_msg_.state
pb_msg_.id = ros_msg_.id
pb_msg_.parent = ros_msg_.parent
pb_msg.min.groups.append(pb_msg_)
for ros_msg_ in ros_msg.dflt.bools:
pb_msg_ = ros_pb.dynamic_reconfigure.BoolParameter()
pb_msg_.name = ros_msg_.name
pb_msg_.value = ros_msg_.value
pb_msg.dflt.bools.append(pb_msg_)
for ros_msg_ in ros_msg.dflt.ints:
pb_msg_ = ros_pb.dynamic_reconfigure.IntParameter()
pb_msg_.name = ros_msg_.name
pb_msg_.value = ros_msg_.value
pb_msg.dflt.ints.append(pb_msg_)
for ros_msg_ in ros_msg.dflt.strs:
pb_msg_ = ros_pb.dynamic_reconfigure.StrParameter()
pb_msg_.name = ros_msg_.name
pb_msg_.value = ros_msg_.value
pb_msg.dflt.strs.append(pb_msg_)
for ros_msg_ in ros_msg.dflt.doubles:
pb_msg_ = ros_pb.dynamic_reconfigure.DoubleParameter()
pb_msg_.name = ros_msg_.name
pb_msg_.value = ros_msg_.value
pb_msg.dflt.doubles.append(pb_msg_)
for ros_msg_ in ros_msg.dflt.groups:
pb_msg_ = ros_pb.dynamic_reconfigure.GroupState()
pb_msg_.name = ros_msg_.name
pb_msg_.state = ros_msg_.state
pb_msg_.id = ros_msg_.id
pb_msg_.parent = ros_msg_.parent
pb_msg.dflt.groups.append(pb_msg_)
yield pb_msg
rospy.sleep(0.01)
class usb_cam_image_raw_compressedDepth_parameter_updatesServicer(ros_grpc.usb_cam_image_raw_compressedDepth_parameter_updatesServicer):
def __init__(self):
self.pub = None
self.Msg = roslib.message.get_message_class('dynamic_reconfigure/Config')
def Publish(self, pb_msg, context):
if self.pub == None:
self.pub = rospy.Publisher('/usb_cam/image_raw/compressedDepth/parameter_updates', self.Msg, queue_size=10)
ros_msg = self.Msg()
for pb_msg_ in pb_msg.bools:
ros_msg_ = roslib.message.get_message_class('dynamic_reconfigure/BoolParameter')()
ros_msg_.name = pb_msg_.name
ros_msg_.value = pb_msg_.value
ros_msg.bools.append(ros_msg_)
for pb_msg_ in pb_msg.ints:
ros_msg_ = roslib.message.get_message_class('dynamic_reconfigure/IntParameter')()
ros_msg_.name = pb_msg_.name
ros_msg_.value = pb_msg_.value
ros_msg.ints.append(ros_msg_)
for pb_msg_ in pb_msg.strs:
ros_msg_ = roslib.message.get_message_class('dynamic_reconfigure/StrParameter')()
ros_msg_.name = pb_msg_.name
ros_msg_.value = pb_msg_.value
ros_msg.strs.append(ros_msg_)
for pb_msg_ in pb_msg.doubles:
ros_msg_ = roslib.message.get_message_class('dynamic_reconfigure/DoubleParameter')()
ros_msg_.name = pb_msg_.name
ros_msg_.value = pb_msg_.value
ros_msg.doubles.append(ros_msg_)
for pb_msg_ in pb_msg.groups:
ros_msg_ = roslib.message.get_message_class('dynamic_reconfigure/GroupState')()
ros_msg_.name = pb_msg_.name
ros_msg_.state = pb_msg_.state
ros_msg_.id = pb_msg_.id
ros_msg_.parent = pb_msg_.parent
ros_msg.groups.append(ros_msg_)
self.pub.publish(ros_msg)
return ros_pb.Empty()
def Subscribe(self, request, context):
c = {'unsubscribed': False}
ros_messages = []
def callback(ros_msg):
ros_messages.append(ros_msg)
subscription = rospy.Subscriber('/usb_cam/image_raw/compressedDepth/parameter_updates', self.Msg, callback)
def on_rpc_done():
c['unsubscribed'] = True
print("Attempting to regain servicer thread...", c)
subscription.unregister()
context.add_callback(on_rpc_done)
while not c['unsubscribed']:
while ros_messages:
ros_msg = ros_messages.pop(0)
pb_msg = ros_pb.dynamic_reconfigure.Config()
for ros_msg_ in ros_msg.bools:
pb_msg_ = ros_pb.dynamic_reconfigure.BoolParameter()
pb_msg_.name = ros_msg_.name
pb_msg_.value = ros_msg_.value
pb_msg.bools.append(pb_msg_)
for ros_msg_ in ros_msg.ints:
pb_msg_ = ros_pb.dynamic_reconfigure.IntParameter()
pb_msg_.name = ros_msg_.name
pb_msg_.value = ros_msg_.value
pb_msg.ints.append(pb_msg_)
for ros_msg_ in ros_msg.strs:
pb_msg_ = ros_pb.dynamic_reconfigure.StrParameter()
pb_msg_.name = ros_msg_.name
pb_msg_.value = ros_msg_.value
pb_msg.strs.append(pb_msg_)
for ros_msg_ in ros_msg.doubles:
pb_msg_ = ros_pb.dynamic_reconfigure.DoubleParameter()
pb_msg_.name = ros_msg_.name
pb_msg_.value = ros_msg_.value
pb_msg.doubles.append(pb_msg_)
for ros_msg_ in ros_msg.groups:
pb_msg_ = ros_pb.dynamic_reconfigure.GroupState()
pb_msg_.name = ros_msg_.name
pb_msg_.state = ros_msg_.state
pb_msg_.id = ros_msg_.id
pb_msg_.parent = ros_msg_.parent
pb_msg.groups.append(pb_msg_)
yield pb_msg
rospy.sleep(0.01)
class usb_cam_image_raw_theoraServicer(ros_grpc.usb_cam_image_raw_theoraServicer):
def __init__(self):
self.pub = None
self.Msg = roslib.message.get_message_class('theora_image_transport/Packet')
def Publish(self, pb_msg, context):
if self.pub == None:
self.pub = rospy.Publisher('/usb_cam/image_raw/theora', self.Msg, queue_size=10)
ros_msg = self.Msg()
ros_msg.header.seq = pb_msg.header.seq
ros_msg.header.stamp.secs = pb_msg.header.stamp.secs
ros_msg.header.stamp.nsecs = pb_msg.header.stamp.nsecs
ros_msg.header.frame_id = pb_msg.header.frame_id
ros_msg.data = pb_msg.data
ros_msg.b_o_s = pb_msg.b_o_s
ros_msg.e_o_s = pb_msg.e_o_s
ros_msg.granulepos = pb_msg.granulepos
ros_msg.packetno = pb_msg.packetno
self.pub.publish(ros_msg)
return ros_pb.Empty()
def Subscribe(self, request, context):
c = {'unsubscribed': False}
ros_messages = []
def callback(ros_msg):
ros_messages.append(ros_msg)
subscription = rospy.Subscriber('/usb_cam/image_raw/theora', self.Msg, callback)
def on_rpc_done():
c['unsubscribed'] = True
print("Attempting to regain servicer thread...", c)
subscription.unregister()
context.add_callback(on_rpc_done)
while not c['unsubscribed']:
while ros_messages:
ros_msg = ros_messages.pop(0)
pb_msg = ros_pb.theora_image_transport.Packet()
pb_msg.header.seq = ros_msg.header.seq
pb_msg.header.stamp.secs = ros_msg.header.stamp.secs
pb_msg.header.stamp.nsecs = ros_msg.header.stamp.nsecs
pb_msg.header.frame_id = ros_msg.header.frame_id
pb_msg.data = ros_msg.data
pb_msg.b_o_s = ros_msg.b_o_s
pb_msg.e_o_s = ros_msg.e_o_s
pb_msg.granulepos = ros_msg.granulepos
pb_msg.packetno = ros_msg.packetno
yield pb_msg
rospy.sleep(0.01)
class usb_cam_image_raw_theora_parameter_descriptionsServicer(ros_grpc.usb_cam_image_raw_theora_parameter_descriptionsServicer):
def __init__(self):
self.pub = None
self.Msg = roslib.message.get_message_class('dynamic_reconfigure/ConfigDescription')
def Publish(self, pb_msg, context):
if self.pub == None:
self.pub = rospy.Publisher('/usb_cam/image_raw/theora/parameter_descriptions', self.Msg, queue_size=10)
ros_msg = self.Msg()
for pb_msg_ in pb_msg.groups:
ros_msg_ = roslib.message.get_message_class('dynamic_reconfigure/Group')()
ros_msg_.name = pb_msg_.name
ros_msg_.type = pb_msg_.type
for pb_msg__ in pb_msg_.parameters:
ros_msg__ = roslib.message.get_message_class('dynamic_reconfigure/ParamDescription')()
ros_msg__.name = pb_msg__.name
ros_msg__.type = pb_msg__.type
ros_msg__.level = pb_msg__.level
ros_msg__.description = pb_msg__.description
ros_msg__.edit_method = pb_msg__.edit_method
ros_msg_.parameters.append(ros_msg__)
ros_msg_.parent = pb_msg_.parent
ros_msg_.id = pb_msg_.id
ros_msg.groups.append(ros_msg_)
for pb_msg_ in pb_msg.max.bools:
ros_msg_ = roslib.message.get_message_class('dynamic_reconfigure/BoolParameter')()
ros_msg_.name = pb_msg_.name
ros_msg_.value = pb_msg_.value
ros_msg.max.bools.append(ros_msg_)
for pb_msg_ in pb_msg.max.ints:
ros_msg_ = roslib.message.get_message_class('dynamic_reconfigure/IntParameter')()
ros_msg_.name = pb_msg_.name
ros_msg_.value = pb_msg_.value
ros_msg.max.ints.append(ros_msg_)
for pb_msg_ in pb_msg.max.strs:
ros_msg_ = roslib.message.get_message_class('dynamic_reconfigure/StrParameter')()
ros_msg_.name = pb_msg_.name
ros_msg_.value = pb_msg_.value
ros_msg.max.strs.append(ros_msg_)
for pb_msg_ in pb_msg.max.doubles:
ros_msg_ = roslib.message.get_message_class('dynamic_reconfigure/DoubleParameter')()
ros_msg_.name = pb_msg_.name
ros_msg_.value = pb_msg_.value
ros_msg.max.doubles.append(ros_msg_)
for pb_msg_ in pb_msg.max.groups:
ros_msg_ = roslib.message.get_message_class('dynamic_reconfigure/GroupState')()
ros_msg_.name = pb_msg_.name
ros_msg_.state = pb_msg_.state
ros_msg_.id = pb_msg_.id
ros_msg_.parent = pb_msg_.parent
ros_msg.max.groups.append(ros_msg_)
for pb_msg_ in pb_msg.min.bools:
ros_msg_ = roslib.message.get_message_class('dynamic_reconfigure/BoolParameter')()
ros_msg_.name = pb_msg_.name
ros_msg_.value = pb_msg_.value
ros_msg.min.bools.append(ros_msg_)
for pb_msg_ in pb_msg.min.ints:
ros_msg_ = roslib.message.get_message_class('dynamic_reconfigure/IntParameter')()
ros_msg_.name = pb_msg_.name
ros_msg_.value = pb_msg_.value
ros_msg.min.ints.append(ros_msg_)
for pb_msg_ in pb_msg.min.strs:
ros_msg_ = roslib.message.get_message_class('dynamic_reconfigure/StrParameter')()
ros_msg_.name = pb_msg_.name
ros_msg_.value = pb_msg_.value
ros_msg.min.strs.append(ros_msg_)
for pb_msg_ in pb_msg.min.doubles:
ros_msg_ | |
new_var, key=ME_TAG, role=ME_TAG)
self.graph.add_edge(new_var, n, key=ME_TAG+"-of", role=ME_TAG+"-of")
used_nodes = [TOP_NODE]
discontinuous=[]
top_align, throwup_discon = fix_helper((None,""),TOP_NODE)
if len(throwup_discon) > 0:
for (r_h, h, r_d, d) in throwup_discon:
logger.info("there should no throwup_discon finally: {}".format((r_h, self.graph.node[h], r_d, self.graph.node[d])))
self.fixed_graph = self.graph.copy()
self.fixed = []
#logger.info("fix discontinous_tokens: {}".format(discontinuous))
# may not be the root, should the lowest common ancestor of its left token and its right token.
for (p_r_h, h, p_r_d, d, p_r_parent, parent) in discontinuous:
p_h_align = self.fixed_graph.node[p_r_h[0]]['align'] if p_r_h[0] else None
p_r_align = self.fixed_graph.node[p_r_d[0]]['align'] if p_r_d[0] else None
p_parent_align = self.fixed_graph.node[p_r_parent[0]]['align'] if p_r_parent[0] else None
self.fixed_graph.remove_edge(h, d)
self.fixed_graph.remove_edge(d, h)
self.fixed_graph.add_edge(parent, d, key=p_r_d[1], role=p_r_d[1])
self.fixed_graph.add_edge(d, parent, key=p_r_d[1]+"-of", role=p_r_d[1]+"-of")
self.fixed.append((((p_h_align, p_r_h[1]),self.fixed_graph.node[h]['align']),((p_r_align, p_r_d[1]), self.fixed_graph.node[d]['align']), ((p_parent_align, p_r_parent[1]), self.fixed_graph.node[parent]['align'])))
#logger.error("Do edge changes for discontinuous:{}".format((p_r_h, self.fixed_graph.node[h]['align'], p_r_d, self.fixed_graph.node[d]['align'], p_r_parent, self.fixed_graph.node[parent]['align'])))
#logger.error("after fix graph: for {}\n fixed_grpah_nodes:{}\n fixed_graph_edges:{}\n".format(self.id, self.fixed_graph.nodes.data(), self.fixed_graph.edges.data()))
@staticmethod
def get_max_con_list(sorted_list):
len_c = len(sorted_list)
max_con_size = 0
max_con_index = 0
continuous = []
con_list = []
for i in range(len_c):
continuous.append(sorted_list[i])
if sorted_list[i] + 1 not in sorted_list:
con_list.append(copy.deepcopy(continuous))
# use right most continuous
if len(continuous) >= max_con_size:
max_con_index = len(con_list) - 1
max_con_size = len(continuous)
continuous = []
return con_list, max_con_index
#
# def toPTBTree(self, tokens, tok_anchors, pos_tokens):
# def helper(start, end):
# children = []
# current_end = start
# while current_end < end:
# current_start = current_end+1
# max_length = -1
# max_node = None
# max_start = 0
# max_end = -1
# for node in self.graph.nodes:
# cur_node = self.graph[node]
# if 'used' not in cur_node and 'anchors' in cur_node:
# label = cur_node["value"].get_label()
# anchors = self.graph[node]['anchors']
# if anchors and anchors[0]['from'] == start:
# # find one starting from the start value
# cur_len = anchors[0]['to'] - start
# if cur_len > max_length and :
# max_length = cur_len
# max_node = node
# max_start = anchors[0]['from']
# max_end = anchors[0]['to']
# else:
# continue
# else:
# continue
# if max_node:
# # find a max continus node from start
# self.graph[max_node]['used'] = True
# # adding to into children
# subchildren = helper(max_start, max_end)
# children.append(InternalTreebankNode(self.graph[max_node]['value'].get_label() , sub_children))
#
# return children
#
# # assuming the last token_acnhor is the last one
# if toke_anchors:
# helper(-1, tok_anchors[-1][-1]["to"])
def get_gold(self):
"""
for a EDSGraph, return all the gold concept and roles.
"""
cons = []
roles = []
for n, d in self.graph.nodes(True):
# add gold concepts into a list
if "gold" in d:
v = d["value"]
cons.append(v)
for h, d, _, rel in self.graph.edges(keys=True,data=True):
# add roles into a list, every role is [h, d, r]
r = rel["role"]
# during learning, only predict the cannonical edge labels, its inversed version is only for connectivity of DiGraph
if self.cannonical(r):
assert "gold" in self.graph.node[h] and "gold" in self.graph.node[d]
h = self.graph.node[h]["value"]
d = self.graph.node[d]["value"]
roles.append([h,d,r])
if self.root:
root = self.graph.node[self.root]["value"]
# todo: add a special Node for EDS
roles.append([EDSUniversal.TOP_EDSUniversal(),root,':top'])
# WARN: here roles may not contains any top relations
return cons,roles
def __getitem__(self, item):
return self.graph.node[item]
#check whether the relation is in the cannonical direction
# ARG0, ARG1, ... ARGn as core rel
# BV also as core rel.
# compound and mwe are special relation, which are usually happened in consecutive tokens.
# Now also make the model to learn this.
def cannonical(self,r):
# now all rel are core, and all forward are cannonical
return ("-of" not in r and self.is_core(r))
@staticmethod
def is_core(r):
"""
for EDS, now treat all edge as core rel
"""
return ("-of" not in r)
@staticmethod
def is_inversed_edge(role):
return role.endswith("-of")
@staticmethod
def is_inversed_edge(edge):
if edge.endswith("-of"):
return True
else:
return False
@staticmethod
def get_inversed_edge(edge):
if edge.endswith("-of"):
inverse = edge[:-3]
else:
inverse = edge + "-of"
return inverse
@staticmethod
def get_normalizede_edge(edge):
if is_inversed_edge(edge):
return get_inversed_edge(edge)
else:
return edge
def getRoles(self,node,index_dict,rel_index,relyed = None):
"""
get all the roles of a node
node : node variable
index_dict is dict(key=node, value = intIndex), the index is the index for recategorized nodes
rel_index, is dict(key=node, value = intIndex), the index is the index for gold nodes
return [[node, node1ReCateIndex], [[rel, node2GoldIndex]]]]
"""
# (amruniversal,index,[[role,rel_index]])
if relyed and relyed not in index_dict:
print ("rely",node,relyed,self.graph.node[relyed]["value"],index_dict,self._anno)
elif relyed is None and node not in index_dict: print (self.graph.node[node]["value"])
# get only the original node index, this index is recategorised index
index = index_dict[node] if relyed is None else index_dict[relyed]
out = []
# if self.graph.node[node]["value"].le != "name":
# self.graph[node] returns all the adj node in a dict(key=neighbor, value=attributes)
for n2 in self.graph[node]:
# the role from n2 to node
for key, edge_data in self.graph[node][n2].items():
r = edge_data["role"]
if self.cannonical(r):
if n2 not in rel_index:
print(self._anno)
# out is [rel_role, gold_dep_node_id]
out.append([r,rel_index[n2]])
return [[self.graph.node[node]["value"],index], out]
def link(self,o_node,n_node,rel):
"""
link o_node(original_node) and n_node(new_node), orginal-of is an array, storing all the recategorzied new nodes. orignal-of or has-original are about the relation ship between the original node and its recategorized node.
The primary node will have attributes "original-of", if rely is true, then it will also have a "rely" attribute.
"""
self.graph.node[o_node].setdefault("original-of",[]).append( n_node ) # for storing order of replacement
if n_node:
# make the opposite relation, has-original
self.graph.node[n_node]["has-original"] = o_node
if rel: self.rely(o_node,n_node)
def add_companion_tag_node(self, parent, cat, tag, index):
var = EDSVar(cat+str(index))
# for new ndoe, we only add tag part, it is united length
uni = EDSUniversal("", "", "","",tag)
self.graph.add_node(var, value=uni, align=[index])
# for true now, we add the has original relation
self.link(parent, var, rel=True)
def rely(self,o_node,n_node):
"""
set rely relation, it is an attribute of original_node, value is the new node
"""
# if o_nodei(original_node) already rely on some node, then don't set it
if "rely" in self.graph.node[o_node]:
return
# set the n_node as the rely for o_node
self.graph.node[o_node].setdefault("rely",n_node)
#return data for training concept identification or relation identification
def node_value(self, keys=["value"], all=False):
def concept_concept():
"""
out: all nodes after recategorizing
index_dict, [key:node, value: index], index is the order for transduce the node in the AMR graph.
"""
# out is an array, [[subnode1, subnode-attr], [subnode2, subnode]]
out = []
# index the order id of a node.
index = 0
# save a node2index dict
index_dict ={}
for n, d in self.graph.nodes(True):
# https://networkx.github.io/documentation/networkx-2.1/reference/classes/generated/networkx.Graph.nodes.html
# n is the node, d is the data with all attributes
# graph.nodes(True), means return entire node attribute dict
# if it has recategorized new nodes, iterate its recategorizeed nodes, only add the combination nodes, not the original node
if "original-of" in d:
comps = d["original-of"]
for comp in comps:
if comp is None:
continue
comp_d = self.graph.node[comp]
# output a (node, value1, value2)
# by default key is value, which is AMRUniversal Node of that node.
out.append([comp] + [comp_d[k] for k in keys])
index_dict[comp] = index
index += 1
elif not ("has-original" in d or "rely" in d):
# TODO: all node in EDS is the original node, without categorizing
# not a recategorized node, just use that node itself.
out.append([n] + [d[k] for k in keys])
index_dict[n] = index
index += 1
# out is an array, [[subnode1, subnode-attr], [subnode2, subnode2_attr]]
# index_dict is dict(key=node, value = intIndex)
return out,index_dict
def rel_concept():
"""
return the gold node and its node2index dict
"""
index = 0
rel_index ={}
# rel_out is in shape like [[n, d]]
rel_out = []
# If True, return entire node attribute dict as (n, ddict).
# n is node Varible, d is all the dict attributes, index is nodes index.
for n, d in self.graph.nodes(True):
if "gold" in d:
rel_out.append([n,d])
rel_index[n] = index
index += 1
# rel_out is in shape like [[var, Node]]
# rel_index is dict(key = var, value= Int index)
return rel_out,rel_index
# out: all the nodes after recategorization
# index_dict, [key:node, value: index], index is the order for transduce the node in the AMR graph.
out,index_dict = concept_concept()
if all:
# all means all attributes
# rel_out: all the gold concepts
# rel_index: a different index from gold node transduce order.
rel_out, rel_index = rel_concept()
for i, n_d in enumerate( rel_out):
n,d = n_d
# rely means n is a original node
if "rely" in d:
# | |
sports|
"""
survey = self.md_to_pyxform_survey(md, {'name': 'sports'})
data = [{"Sport": "Basketball", "sport": "Soccer",
'_submission_time': '2016-11-21T03:43:43.000-08:00'}]
export_builder = ExportBuilder()
export_builder.set_survey(survey)
temp_zip_file = NamedTemporaryFile(suffix='.zip')
export_builder.to_zipped_sav(temp_zip_file.name, data)
temp_zip_file.seek(0)
temp_dir = tempfile.mkdtemp()
zip_file = zipfile.ZipFile(temp_zip_file.name, "r")
zip_file.extractall(temp_dir)
zip_file.close()
temp_zip_file.close()
# check that the children's file (which has the unicode header) exists
self.assertTrue(
os.path.exists(
os.path.join(temp_dir, "sports.sav")))
# check file's contents
with SavReader(os.path.join(temp_dir, "sports.sav"),
returnHeader=True) as reader:
rows = [r for r in reader]
# Check that columns are present
self.assertIn(b"Sport", rows[0])
# Check for sport in first 5 characters
# because rows contains 'sport@d4b6'
self.assertIn(b"sport", [x[0:5] for x in rows[0]])
def test_xls_export_works_with_unicode(self):
survey = create_survey_from_xls(_logger_fixture_path(
'childrens_survey_unicode.xls'))
export_builder = ExportBuilder()
export_builder.set_survey(survey)
temp_xls_file = NamedTemporaryFile(suffix='.xlsx')
export_builder.to_xls_export(temp_xls_file.name, self.data_utf8)
temp_xls_file.seek(0)
# check that values for red\'s and blue\'s are set to true
wb = load_workbook(temp_xls_file.name)
children_sheet = wb["children.info"]
data = dict([(x.value, y.value) for x, y in children_sheet.columns])
self.assertTrue(data["children.info/fav_colors/red's"])
self.assertTrue(data["children.info/fav_colors/blue's"])
self.assertFalse(data["children.info/fav_colors/pink's"])
temp_xls_file.close()
def test_xls_export_with_hxl_adds_extra_row(self):
# hxl_example.xlsx contains `instance::hxl` column whose value is #age
xlsform_path = os.path.join(
settings.PROJECT_ROOT, "apps", "main", "tests", "fixtures",
"hxl_test", "hxl_example.xlsx")
survey = create_survey_from_xls(xlsform_path)
export_builder = ExportBuilder()
export_builder.INCLUDE_HXL = True
export_builder.set_survey(survey)
temp_xls_file = NamedTemporaryFile(suffix='.xlsx')
survey_elements = [
survey_item[1]
for survey_item in survey.items()
if survey_item[0] == 'children'
][0]
columns_with_hxl = export_builder.INCLUDE_HXL and get_columns_with_hxl(
survey_elements
)
export_builder.to_xls_export(
temp_xls_file.name, self.data_utf8,
columns_with_hxl=columns_with_hxl)
temp_xls_file.seek(0)
wb = load_workbook(temp_xls_file.name)
children_sheet = wb["hxl_example"]
self.assertTrue(children_sheet)
# we pick the second row because the first row has xform fieldnames
rows = [row for row in children_sheet.rows]
hxl_row = [a.value for a in rows[1]]
self.assertIn('#age', hxl_row)
def test_export_with_image_attachments(self):
"""
Test that the url for images is displayed correctly in exports
"""
md = """
| survey | | | |
| | type | name | label |
| | image | image1 | Photo |
"""
self._create_user_and_login()
self.xform = self._publish_markdown(md, self.user)
xml_string = """
<data id="{}">
<meta>
<instanceID>uuid:UJ6jSMAJ1Jz4EszdgHy8n852AsKaqBPO5</instanceID>
</meta>
<image1>1300221157303.jpg</image1>
</data>
""".format(self.xform.id_string)
file_path = "{}/apps/logger/tests/Health_2011_03_13."\
"xml_2011-03-15_20-30-28/1300221157303"\
".jpg".format(settings.PROJECT_ROOT)
media_file = django_file(path=file_path,
field_name="image1",
content_type="image/jpeg")
create_instance(self.user.username,
BytesIO(xml_string.strip().encode('utf-8')),
media_files=[media_file])
xdata = query_data(self.xform)
survey = self.md_to_pyxform_survey(md, {'name': 'exp'})
export_builder = ExportBuilder()
export_builder.set_survey(survey)
temp_xls_file = NamedTemporaryFile(suffix='.xlsx')
export_builder.to_xls_export(temp_xls_file, xdata)
temp_xls_file.seek(0)
wb = load_workbook(temp_xls_file)
children_sheet = wb["exp"]
self.assertTrue(children_sheet)
rows = [row for row in children_sheet.rows]
row = [a.value for a in rows[1]]
attachment_id = xdata[0]['_attachments'][0]['id']
attachment_url = 'http://example.com/api/v1/files/{}?filename=bob/attachments/{}_{}/1300221157303.jpg'.format(attachment_id, self.xform.id, self.xform.id_string) # noqa
self.assertIn(attachment_url, row)
temp_xls_file.close()
def test_generation_of_multi_selects_works(self):
survey = self._create_childrens_survey()
export_builder = ExportBuilder()
export_builder.set_survey(survey)
expected_select_multiples =\
{
'children':
{
'children/fav_colors':
[
'children/fav_colors/red', 'children/fav_colors/blue',
'children/fav_colors/pink'
],
'children/ice.creams':
[
'children/ice.creams/vanilla',
'children/ice.creams/strawberry',
'children/ice.creams/chocolate'
]
}
}
select_multiples = export_builder.select_multiples
self.assertTrue('children' in select_multiples)
self.assertTrue('children/fav_colors' in select_multiples['children'])
self.assertTrue('children/ice.creams' in select_multiples['children'])
self.assertEqual(
sorted([
choice['xpath'] for choice in
select_multiples['children']['children/fav_colors']]),
sorted(
expected_select_multiples['children']['children/fav_colors']))
self.assertEqual(
sorted([choice['xpath'] for choice in
select_multiples['children']['children/ice.creams']]),
sorted(
expected_select_multiples['children']['children/ice.creams']))
def test_split_select_multiples_works(self):
"""
Test split_select_multiples works as expected.
"""
select_multiples =\
{
'children/fav_colors': [
{
'xpath': 'children/fav_colors/red',
'label': 'fav_colors/Red',
}, {
'xpath': 'children/fav_colors/blue',
'label': 'fav_colors/Blue',
}, {
'xpath': 'children/fav_colors/pink',
'label': 'fav_colors/Pink',
}
]
}
row = \
{
'children/name': 'Mike',
'children/age': 5,
'children/fav_colors': 'red blue'
}
new_row = ExportBuilder.split_select_multiples(
row, select_multiples)
expected_row = \
{
'children/name': 'Mike',
'children/age': 5,
'children/fav_colors': 'red blue',
'children/fav_colors/red': True,
'children/fav_colors/blue': True,
'children/fav_colors/pink': False
}
self.assertEqual(new_row, expected_row)
row = \
{
'children/name': 'Mike',
'children/age': 5,
}
new_row = ExportBuilder.split_select_multiples(
row, select_multiples)
expected_row = \
{
'children/name': 'Mike',
'children/age': 5,
'children/fav_colors/red': None,
'children/fav_colors/blue': None,
'children/fav_colors/pink': None
}
self.assertEqual(new_row, expected_row)
def test_split_select_mutliples_works_with_int_value_in_row(self):
select_multiples = {
'children/fav_number': [
{
'xpath': 'children/fav_number/1',
}, {
'xpath': 'children/fav_number/2',
}, {
'xpath': 'children/fav_number/3',
}
]
}
row = {'children/fav_number': 1}
expected_row = {
'children/fav_number/1': True,
'children/fav_number': 1,
'children/fav_number/3': False,
'children/fav_number/2': False
}
new_row = ExportBuilder.split_select_multiples(row, select_multiples)
self.assertTrue(new_row)
self.assertEqual(new_row, expected_row)
def test_split_select_multiples_works_when_data_is_blank(self):
select_multiples =\
{
'children/fav_colors': [
{
'xpath': 'children/fav_colors/red',
'label': 'fav_colors/Red',
}, {
'xpath': 'children/fav_colors/blue',
'label': 'fav_colors/Blue',
}, {
'xpath': 'children/fav_colors/pink',
'label': 'fav_colors/Pink',
}
]
}
row = \
{
'children/name': 'Mike',
'children/age': 5,
'children/fav_colors': ''
}
new_row = ExportBuilder.split_select_multiples(
row, select_multiples)
expected_row = \
{
'children/name': 'Mike',
'children/age': 5,
'children/fav_colors': '',
'children/fav_colors/red': None,
'children/fav_colors/blue': None,
'children/fav_colors/pink': None
}
self.assertEqual(new_row, expected_row)
def test_generation_of_gps_fields_works(self):
survey = self._create_childrens_survey()
export_builder = ExportBuilder()
export_builder.set_survey(survey)
expected_gps_fields =\
{
'childrens_survey':
{
'geo/geolocation':
[
'geo/_geolocation_latitude',
'geo/_geolocation_longitude',
'geo/_geolocation_altitude',
'geo/_geolocation_precision'
]
}
}
gps_fields = export_builder.gps_fields
self.assertTrue('childrens_survey' in gps_fields)
self.assertEqual(
sorted(gps_fields['childrens_survey']),
sorted(expected_gps_fields['childrens_survey']))
def test_split_gps_components_works(self):
gps_fields =\
{
'geo/geolocation':
[
'geo/_geolocation_latitude', 'geo/_geolocation_longitude',
'geo/_geolocation_altitude', 'geo/_geolocation_precision'
]
}
row = \
{
'geo/geolocation': '1.0 36.1 2000 20',
}
new_row = ExportBuilder.split_gps_components(
row, gps_fields)
expected_row = \
{
'geo/geolocation': '1.0 36.1 2000 20',
'geo/_geolocation_latitude': '1.0',
'geo/_geolocation_longitude': '36.1',
'geo/_geolocation_altitude': '2000',
'geo/_geolocation_precision': '20'
}
self.assertEqual(new_row, expected_row)
def test_split_gps_components_works_when_gps_data_is_blank(self):
gps_fields =\
{
'geo/geolocation':
[
'geo/_geolocation_latitude', 'geo/_geolocation_longitude',
'geo/_geolocation_altitude', 'geo/_geolocation_precision'
]
}
row = \
{
'geo/geolocation': '',
}
new_row = ExportBuilder.split_gps_components(
row, gps_fields)
expected_row = \
{
'geo/geolocation': '',
}
self.assertEqual(new_row, expected_row)
def test_generation_of_mongo_encoded_fields_works(self):
survey = self._create_childrens_survey()
export_builder = ExportBuilder()
export_builder.set_survey(survey)
expected_encoded_fields =\
{
'childrens_survey':
{
'tel/tel.office': 'tel/{0}'.format(
_encode_for_mongo('tel.office')),
'tel/tel.mobile': 'tel/{0}'.format(
_encode_for_mongo('tel.mobile')),
}
}
encoded_fields = export_builder.encoded_fields
self.assertTrue('childrens_survey' in encoded_fields)
self.assertEqual(
encoded_fields['childrens_survey'],
expected_encoded_fields['childrens_survey'])
def test_decode_fields_names_encoded_for_mongo(self):
encoded_fields = \
{
'tel/tel.office': 'tel/{0}'.format(
_encode_for_mongo('tel.office'))
}
row = \
{
'name': 'Abe',
'age': 35,
'tel/{0}'.format(
_encode_for_mongo('tel.office')): '123-456-789'
}
new_row = ExportBuilder.decode_mongo_encoded_fields(
row, encoded_fields)
expected_row = \
{
'name': 'Abe',
'age': 35,
'tel/tel.office': '123-456-789'
}
self.assertEqual(new_row, expected_row)
def test_generate_field_title(self):
self._create_childrens_survey()
field_name = ExportBuilder.format_field_title("children/age", ".",
data_dictionary=self.dd)
expected_field_name = "children.age"
self.assertEqual(field_name, expected_field_name)
def test_delimiter_replacement_works_existing_fields(self):
survey = self._create_childrens_survey()
export_builder = ExportBuilder()
export_builder.GROUP_DELIMITER = "."
export_builder.set_survey(survey)
expected_sections =\
[
{
'name': 'children',
'elements': [
{
'title': 'children.name',
'xpath': 'children/name'
}
]
}
]
children_section = export_builder.section_by_name('children')
self.assertEqual(
children_section['elements'][0]['title'],
expected_sections[0]['elements'][0]['title'])
def test_delimiter_replacement_works_generated_multi_select_fields(self):
survey = self._create_childrens_survey()
export_builder = ExportBuilder()
export_builder.GROUP_DELIMITER = "."
export_builder.set_survey(survey)
expected_section =\
{
'name': 'children',
'elements': [
{
'title': 'children.fav_colors.red',
'xpath': 'children/fav_colors/red'
}
]
}
childrens_section = export_builder.section_by_name('children')
match = [x for x in childrens_section['elements']
if expected_section['elements'][0]['xpath'] == x['xpath']][0]
self.assertEqual(
expected_section['elements'][0]['title'], match['title'])
def test_delimiter_replacement_works_for_generated_gps_fields(self):
survey = self._create_childrens_survey()
export_builder = ExportBuilder()
export_builder.GROUP_DELIMITER = "."
export_builder.set_survey(survey)
expected_section = \
{
'name': 'childrens_survey',
'elements': [
{
'title': 'geo._geolocation_latitude',
'xpath': 'geo/_geolocation_latitude'
}
]
}
main_section = export_builder.section_by_name('childrens_survey')
match = [x for x in main_section['elements']
if expected_section['elements'][0]['xpath'] == x['xpath']][0]
self.assertEqual(
expected_section['elements'][0]['title'], match['title'])
def test_to_xls_export_works(self):
survey = self._create_childrens_survey()
export_builder = ExportBuilder()
export_builder.set_survey(survey)
xls_file = NamedTemporaryFile(suffix='.xls')
filename = xls_file.name
export_builder.to_xls_export(filename, self.data)
xls_file.seek(0)
wb = xlrd.open_workbook(filename)
# check that we have childrens_survey, children, children_cartoons
# and children_cartoons_characters sheets
expected_sheet_names = ['childrens_survey', 'children',
'children_cartoons',
'children_cartoons_characters']
self.assertEqual(wb.sheet_names(), expected_sheet_names)
# check header columns
main_sheet = wb.sheet_by_name('childrens_survey')
expected_column_headers = [
'name', 'age', 'geo/geolocation', 'geo/_geolocation_latitude',
'geo/_geolocation_longitude', 'geo/_geolocation_altitude',
'geo/_geolocation_precision', 'tel/tel.office',
'tel/tel.mobile', '_id', 'meta/instanceID', '_uuid',
'_submission_time', '_index', '_parent_index',
'_parent_table_name', '_tags', '_notes', '_version',
'_duration', '_submitted_by']
column_headers = main_sheet.row_values(0)
self.assertEqual(sorted(column_headers),
sorted(expected_column_headers))
childrens_sheet = wb.sheet_by_name('children')
expected_column_headers = [
'children/name', 'children/age', 'children/fav_colors',
'children/fav_colors/red', 'children/fav_colors/blue',
'children/fav_colors/pink', 'children/ice.creams',
'children/ice.creams/vanilla', 'children/ice.creams/strawberry',
'children/ice.creams/chocolate', '_id', '_uuid',
'_submission_time', '_index', '_parent_index',
'_parent_table_name', '_tags', '_notes', '_version',
'_duration', '_submitted_by']
column_headers = childrens_sheet.row_values(0)
self.assertEqual(sorted(column_headers),
sorted(expected_column_headers))
cartoons_sheet = wb.sheet_by_name('children_cartoons')
expected_column_headers = [
'children/cartoons/name', 'children/cartoons/why', '_id',
'_uuid', '_submission_time', '_index', '_parent_index',
'_parent_table_name', '_tags', '_notes', '_version',
'_duration', '_submitted_by']
column_headers = cartoons_sheet.row_values(0)
self.assertEqual(sorted(column_headers),
sorted(expected_column_headers))
characters_sheet = wb.sheet_by_name('children_cartoons_characters')
expected_column_headers = [
'children/cartoons/characters/name',
'children/cartoons/characters/good_or_evil', '_id', '_uuid',
'_submission_time', '_index', '_parent_index',
'_parent_table_name', '_tags', '_notes', '_version',
'_duration', '_submitted_by']
column_headers = characters_sheet.row_values(0)
self.assertEqual(sorted(column_headers),
sorted(expected_column_headers))
xls_file.close()
def test_to_xls_export_respects_custom_field_delimiter(self):
survey = self._create_childrens_survey()
export_builder = ExportBuilder()
export_builder.GROUP_DELIMITER = ExportBuilder.GROUP_DELIMITER_DOT
export_builder.set_survey(survey)
xls_file = NamedTemporaryFile(suffix='.xls')
filename = xls_file.name
export_builder.to_xls_export(filename, self.data)
xls_file.seek(0)
wb = xlrd.open_workbook(filename)
# check header columns
main_sheet = wb.sheet_by_name('childrens_survey')
expected_column_headers = [
'name', 'age', 'geo.geolocation', 'geo._geolocation_latitude',
'geo._geolocation_longitude', 'geo._geolocation_altitude',
'geo._geolocation_precision', 'tel.tel.office',
'tel.tel.mobile', '_id', 'meta.instanceID', '_uuid',
'_submission_time', '_index', '_parent_index',
'_parent_table_name', '_tags', '_notes', '_version',
'_duration', '_submitted_by']
column_headers = main_sheet.row_values(0)
self.assertEqual(sorted(column_headers),
sorted(expected_column_headers))
xls_file.close()
def test_get_valid_sheet_name_catches_duplicates(self):
work_sheets = {'childrens_survey': "Worksheet"}
desired_sheet_name = "childrens_survey"
expected_sheet_name = "childrens_survey1"
generated_sheet_name = ExportBuilder.get_valid_sheet_name(
desired_sheet_name, work_sheets)
self.assertEqual(generated_sheet_name, expected_sheet_name)
def test_get_valid_sheet_name_catches_long_names(self):
desired_sheet_name = "childrens_survey_with_a_very_long_name"
expected_sheet_name = "childrens_survey_with_a_very_lo"
generated_sheet_name = ExportBuilder.get_valid_sheet_name(
desired_sheet_name, [])
self.assertEqual(generated_sheet_name, expected_sheet_name)
def test_get_valid_sheet_name_catches_long_duplicate_names(self):
work_sheet_titles = ['childrens_survey_with_a_very_lo']
desired_sheet_name = "childrens_survey_with_a_very_long_name"
expected_sheet_name = "childrens_survey_with_a_very_l1"
generated_sheet_name = ExportBuilder.get_valid_sheet_name(
desired_sheet_name, work_sheet_titles)
self.assertEqual(generated_sheet_name, expected_sheet_name)
def test_to_xls_export_generates_valid_sheet_names(self):
survey = create_survey_from_xls(_logger_fixture_path(
'childrens_survey_with_a_very_long_name.xls'))
export_builder = ExportBuilder()
export_builder.set_survey(survey)
xls_file = NamedTemporaryFile(suffix='.xls')
filename = xls_file.name
export_builder.to_xls_export(filename, self.data)
xls_file.seek(0)
wb = xlrd.open_workbook(filename)
# check that we have childrens_survey, children, children_cartoons
# and children_cartoons_characters sheets
expected_sheet_names = ['childrens_survey_with_a_very_lo',
'childrens_survey_with_a_very_l1',
'childrens_survey_with_a_very_l2',
'childrens_survey_with_a_very_l3']
self.assertEqual(wb.sheet_names(), expected_sheet_names)
xls_file.close()
def test_child_record_parent_table_is_updated_when_sheet_is_renamed(self):
survey = create_survey_from_xls(_logger_fixture_path(
'childrens_survey_with_a_very_long_name.xls'))
export_builder = ExportBuilder()
export_builder.set_survey(survey)
xls_file = NamedTemporaryFile(suffix='.xlsx')
filename = xls_file.name
export_builder.to_xls_export(filename, self.long_survey_data)
xls_file.seek(0)
wb | |
(51, 'K'): 10,
(51, 'L'): 10,
(51, 'M'): 10,
(51, 'N'): 10,
(51, 'O'): 10,
(51, 'P'): 10,
(51, 'Q'): 10,
(51, 'R'): 10,
(51, 'S'): 10,
(51, 'T'): 10,
(51, 'U'): 10,
(51, 'V'): 10,
(51, 'W'): 10,
(51, 'X'): 10,
(51, 'Y'): 10,
(51, 'Z'): 10,
(51, '_'): 10,
(51, 'a'): 10,
(51, 'b'): 10,
(51, 'c'): 10,
(51, 'd'): 10,
(51, 'e'): 52,
(51, 'f'): 10,
(51, 'g'): 10,
(51, 'h'): 10,
(51, 'i'): 10,
(51, 'j'): 10,
(51, 'k'): 10,
(51, 'l'): 10,
(51, 'm'): 10,
(51, 'n'): 10,
(51, 'o'): 10,
(51, 'p'): 10,
(51, 'q'): 10,
(51, 'r'): 10,
(51, 's'): 10,
(51, 't'): 10,
(51, 'u'): 10,
(51, 'v'): 10,
(51, 'w'): 10,
(51, 'x'): 10,
(51, 'y'): 10,
(51, 'z'): 10,
(52, '0'): 10,
(52, '1'): 10,
(52, '2'): 10,
(52, '3'): 10,
(52, '4'): 10,
(52, '5'): 10,
(52, '6'): 10,
(52, '7'): 10,
(52, '8'): 10,
(52, '9'): 10,
(52, 'A'): 10,
(52, 'B'): 10,
(52, 'C'): 10,
(52, 'D'): 10,
(52, 'E'): 10,
(52, 'F'): 10,
(52, 'G'): 10,
(52, 'H'): 10,
(52, 'I'): 10,
(52, 'J'): 10,
(52, 'K'): 10,
(52, 'L'): 10,
(52, 'M'): 10,
(52, 'N'): 10,
(52, 'O'): 10,
(52, 'P'): 10,
(52, 'Q'): 10,
(52, 'R'): 10,
(52, 'S'): 10,
(52, 'T'): 10,
(52, 'U'): 10,
(52, 'V'): 10,
(52, 'W'): 10,
(52, 'X'): 10,
(52, 'Y'): 10,
(52, 'Z'): 10,
(52, '_'): 10,
(52, 'a'): 10,
(52, 'b'): 10,
(52, 'c'): 10,
(52, 'd'): 10,
(52, 'e'): 10,
(52, 'f'): 10,
(52, 'g'): 10,
(52, 'h'): 10,
(52, 'i'): 10,
(52, 'j'): 10,
(52, 'k'): 10,
(52, 'l'): 10,
(52, 'm'): 10,
(52, 'n'): 10,
(52, 'o'): 10,
(52, 'p'): 10,
(52, 'q'): 10,
(52, 'r'): 10,
(52, 's'): 10,
(52, 't'): 10,
(52, 'u'): 10,
(52, 'v'): 10,
(52, 'w'): 10,
(52, 'x'): 10,
(52, 'y'): 10,
(52, 'z'): 10,
(53, '0'): 10,
(53, '1'): 10,
(53, '2'): 10,
(53, '3'): 10,
(53, '4'): 10,
(53, '5'): 10,
(53, '6'): 10,
(53, '7'): 10,
(53, '8'): 10,
(53, '9'): 10,
(53, 'A'): 10,
(53, 'B'): 10,
(53, 'C'): 10,
(53, 'D'): 10,
(53, 'E'): 10,
(53, 'F'): 10,
(53, 'G'): 10,
(53, 'H'): 10,
(53, 'I'): 10,
(53, 'J'): 10,
(53, 'K'): 10,
(53, 'L'): 10,
(53, 'M'): 10,
(53, 'N'): 10,
(53, 'O'): 10,
(53, 'P'): 10,
(53, 'Q'): 10,
(53, 'R'): 10,
(53, 'S'): 10,
(53, 'T'): 10,
(53, 'U'): 10,
(53, 'V'): 10,
(53, 'W'): 10,
(53, 'X'): 10,
(53, 'Y'): 10,
(53, 'Z'): 10,
(53, '_'): 10,
(53, 'a'): 10,
(53, 'b'): 10,
(53, 'c'): 10,
(53, 'd'): 10,
(53, 'e'): 10,
(53, 'f'): 10,
(53, 'g'): 10,
(53, 'h'): 10,
(53, 'i'): 10,
(53, 'j'): 10,
(53, 'k'): 10,
(53, 'l'): 10,
(53, 'm'): 10,
(53, 'n'): 10,
(53, 'o'): 10,
(53, 'p'): 10,
(53, 'q'): 10,
(53, 'r'): 10,
(53, 's'): 10,
(53, 't'): 10,
(53, 'u'): 10,
(53, 'v'): 10,
(53, 'w'): 10,
(53, 'x'): 10,
(53, 'y'): 10,
(53, 'z'): 10,
(54, '='): 63,
(56, '.'): 62,
(57, '='): 61,
(59, '='): 60,
(64, '>'): 66,
(67, '0'): 10,
(67, '1'): 10,
(67, '2'): 10,
(67, '3'): 10,
(67, '4'): 10,
(67, '5'): 10,
(67, '6'): 10,
(67, '7'): 10,
(67, '8'): 10,
(67, '9'): 10,
(67, 'A'): 10,
(67, 'B'): 10,
(67, 'C'): 10,
(67, 'D'): 10,
(67, 'E'): 10,
(67, 'F'): 10,
(67, 'G'): 10,
(67, 'H'): 10,
(67, 'I'): 10,
(67, 'J'): 10,
(67, 'K'): 10,
(67, 'L'): 10,
(67, 'M'): 10,
(67, 'N'): 10,
(67, 'O'): 10,
(67, 'P'): 10,
(67, 'Q'): 10,
(67, 'R'): 10,
(67, 'S'): 10,
(67, 'T'): 10,
(67, 'U'): 10,
(67, 'V'): 10,
(67, 'W'): 10,
(67, 'X'): 10,
(67, 'Y'): 10,
(67, 'Z'): 10,
(67, '_'): 10,
(67, 'a'): 10,
(67, 'b'): 10,
(67, 'c'): 10,
(67, 'd'): 10,
(67, 'e'): 10,
(67, 'f'): 10,
(67, 'g'): 10,
(67, 'h'): 10,
(67, 'i'): 10,
(67, 'j'): 10,
(67, 'k'): 10,
(67, 'l'): 10,
(67, 'm'): 68,
(67, 'n'): 10,
(67, 'o'): 10,
(67, 'p'): 10,
(67, 'q'): 10,
(67, 'r'): 10,
(67, 's'): 10,
(67, 't'): 10,
(67, 'u'): 10,
(67, 'v'): 10,
(67, 'w'): 10,
(67, 'x'): 10,
(67, 'y'): 10,
(67, 'z'): 10,
(68, '0'): 10,
(68, '1'): 10,
(68, '2'): 10,
(68, '3'): 10,
(68, '4'): 10,
(68, '5'): 10,
(68, '6'): 10,
(68, '7'): 10,
(68, '8'): 10,
(68, '9'): 10,
(68, 'A'): 10,
(68, 'B'): 10,
(68, 'C'): 10,
(68, 'D'): 10,
(68, 'E'): 10,
(68, 'F'): 10,
(68, 'G'): 10,
(68, 'H'): 10,
(68, 'I'): 10,
(68, 'J'): 10,
(68, 'K'): 10,
(68, 'L'): 10,
(68, 'M'): 10,
(68, 'N'): 10,
(68, 'O'): 10,
(68, 'P'): 10,
(68, 'Q'): 10,
(68, 'R'): 10,
(68, 'S'): 10,
(68, 'T'): 10,
(68, 'U'): 10,
(68, 'V'): 10,
(68, 'W'): 10,
(68, 'X'): 10,
(68, 'Y'): 10,
(68, 'Z'): 10,
(68, '_'): 10,
(68, 'a'): 10,
(68, 'b'): 10,
(68, 'c'): 10,
(68, 'd'): 10,
(68, 'e'): 10,
(68, 'f'): 10,
(68, 'g'): 10,
(68, 'h'): 10,
(68, 'i'): 10,
(68, 'j'): 10,
(68, 'k'): 10,
(68, 'l'): 10,
(68, 'm'): 10,
(68, 'n'): 10,
(68, 'o'): 10,
(68, 'p'): 10,
(68, 'q'): 10,
(68, 'r'): 10,
(68, 's'): 10,
(68, 't'): 10,
(68, 'u'): 10,
(68, 'v'): 10,
(68, 'w'): 10,
(68, 'x'): 10,
(68, 'y'): 10,
(68, 'z'): 10,
(69, '0'): 10,
(69, '1'): 10,
(69, '2'): 10,
(69, '3'): 10,
(69, '4'): 10,
(69, '5'): 10,
(69, '6'): 10,
(69, '7'): 10,
(69, '8'): 10,
(69, '9'): 10,
(69, 'A'): 10,
(69, 'B'): 10,
(69, 'C'): 10,
(69, 'D'): 10,
(69, 'E'): 10,
(69, 'F'): 10,
(69, 'G'): 10,
(69, 'H'): 10,
(69, 'I'): 10,
(69, 'J'): 10,
(69, 'K'): 10,
(69, 'L'): 10,
(69, 'M'): 10,
(69, 'N'): 10,
(69, 'O'): 10,
(69, 'P'): 10,
(69, 'Q'): 10,
(69, 'R'): 10,
(69, 'S'): 10,
(69, 'T'): 10,
(69, 'U'): 10,
(69, 'V'): 10,
(69, 'W'): 10,
(69, 'X'): 10,
(69, 'Y'): 10,
(69, 'Z'): 10,
(69, '_'): 10,
(69, 'a'): 10,
(69, 'b'): 10,
(69, 'c'): 10,
(69, 'd'): 10,
(69, 'e'): 10,
(69, 'f'): 10,
(69, 'g'): 10,
(69, 'h'): 10,
(69, 'i'): 10,
(69, 'j'): 10,
(69, 'k'): 10,
(69, 'l'): 10,
(69, 'm'): 10,
(69, 'n'): 10,
(69, 'o'): 70,
(69, 'p'): 10,
(69, 'q'): 10,
(69, 'r'): 10,
(69, 's'): 10,
(69, 't'): 10,
(69, 'u'): 10,
(69, 'v'): 10,
(69, 'w'): 10,
(69, 'x'): 10,
(69, 'y'): 10,
(69, 'z'): 10,
(70, '0'): 10,
(70, '1'): 10,
(70, '2'): 10,
(70, '3'): 10,
(70, '4'): 10,
(70, '5'): 10,
(70, '6'): 10,
(70, '7'): 10,
(70, '8'): 10,
(70, '9'): 10,
(70, 'A'): 10,
(70, 'B'): 10,
(70, 'C'): 10,
(70, 'D'): 10,
(70, 'E'): 10,
(70, 'F'): 10,
(70, 'G'): 10,
(70, 'H'): 10,
(70, 'I'): 10,
(70, 'J'): 10,
(70, 'K'): 10,
(70, 'L'): 10,
(70, 'M'): 10,
(70, 'N'): 10,
(70, 'O'): 10,
(70, 'P'): 10,
(70, 'Q'): 10,
(70, 'R'): 10,
(70, 'S'): 10,
(70, 'T'): 10,
(70, 'U'): 10,
(70, 'V'): 10,
(70, 'W'): 10,
(70, 'X'): 10,
(70, 'Y'): 10,
(70, 'Z'): 10,
(70, '_'): 10,
(70, 'a'): 10,
(70, 'b'): 10,
(70, 'c'): 71,
(70, 'd'): 10,
(70, 'e'): 10,
(70, 'f'): 10,
(70, 'g'): 10,
(70, 'h'): 10,
(70, 'i'): 10,
(70, 'j'): 10,
(70, 'k'): 10,
(70, 'l'): 10,
(70, 'm'): 10,
(70, 'n'): 10,
(70, 'o'): 10,
(70, 'p'): 10,
(70, 'q'): 10,
(70, 'r'): 10,
(70, 's'): 10,
(70, 't'): 10,
(70, 'u'): 10,
(70, 'v'): 10,
(70, 'w'): 10,
(70, 'x'): 10,
(70, 'y'): 10,
(70, 'z'): 10,
(71, '0'): | |
<reponame>benchenas/BenchENAS<gh_stars>1-10
import numpy as np
import hashlib
import copy
from compute import Config_ini
from algs.evocnn.genetic.statusupdatetool import StatusUpdateTool
class Unit(object):
def __init__(self, number):
self.number = number
class ConvUnit(Unit):
def __init__(self, number, filter_width, filter_height, in_channel, out_channel, stride_width, stride_height,
conv_type, mean, std):
super().__init__(number)
self.type = 1
self.filter_size = filter_width, filter_height
self.in_channel = in_channel
self.out_channel = out_channel
self.stride_size = stride_width, stride_height
self.conv_type = conv_type # 0 denotes VALID, 1 denotes SAME
self.mean = mean
self.std = std
class PoolUnit(Unit):
def __init__(self, number, max_or_avg, kernel_width, kernel_height, stride_width, stride_height):
super().__init__(number)
self.type = 2
self.kernel_size = kernel_width, kernel_height
self.stride_size = stride_width, stride_height
self.max_or_avg = max_or_avg # max_pool for < 0.5 otherwise avg_pool
class FcUnit(Unit):
def __init__(self, number, input_neurons_number, output_neurons_number, mean, std):
super().__init__(number)
self.type = 3
self.input_neurons_number = input_neurons_number
self.output_neurons_number = output_neurons_number
self.mean = mean
self.std = std
class Individual(object):
def __init__(self, params, indi_no):
self.acc_mean = -1.0
self.acc_std = 0.0
self.complexity = 0
self.id = indi_no # for record the id of current individual
self.number_id = 0 # for record the latest number of basic unit
self.image_channel = params['image_channel']
self.min_conv = params['min_conv'] # minimal number of convolution units
self.max_conv = params['max_conv'] # maximal number of convolution units
self.min_pool = params['min_pool'] # minimal number of pool units
self.max_pool = params['max_pool'] # maximal number of pool units
self.min_fc = params['min_fc'] # minimal number of full connected units
self.max_fc = params['max_fc'] # maximal number of full connected units
self.min_std = params['min_std'] # minimal std
self.max_std = params['max_std'] # maximal std
self.min_mean = params['min_mean'] # minimal mean
self.max_mean = params['max_mean'] # maximal mean
self.min_conv_filter_size = params['conv_filter_size_min'] # the minimal filter size of convolution
self.max_conv_filter_size = params['conv_filter_size_max'] # the maximal filter size of convolution
self.min_channel = params['min_channel'] # the min out channel of the convolution unit
self.max_channel = params['max_channel'] # the max out channel of the convolution unit
self.pool_kernel_size_list = params['pool_kernel_size_list'] # the kernel size list of pool
# this is not the true kernel size of the pool, and it's an exponent of 2
self.min_hidden_neurons = params['min_hidden_neurons'] # the min number of hidden neurons
self.max_hidden_neurons = params['max_hidden_neurons'] # the max number of hidden neurons
self.units = []
def reset_acc(self):
self.acc_mean = -1.0
self.acc_std = 0.0
self.complexity = 0
def initialize(self):
# initialize how many resnet unit/pooling layer/densenet unit will be used
num_conv = np.random.randint(self.min_conv, self.max_conv + 1)
num_pool = np.random.randint(self.min_pool, self.max_pool + 1)
num_fc = np.random.randint(self.min_fc - 1, self.max_fc) # because the last unit must be a fc unit
input_channel = self.image_channel
img_size = StatusUpdateTool.get_input_size()
for _ in range(num_conv):
# generate a conv unit
conv = self.init_a_conv(_in_channel=input_channel)
input_channel = conv.out_channel
self.units.append(conv)
for _ in range(num_pool):
# generate a pool unit
pool = self.init_a_pool()
self.units.append(pool)
img_size = int(img_size / pool.kernel_size[0]) # default kernel_size[0] = kernel_size[1]
input_channel = input_channel * img_size ** 2
for _ in range(num_fc):
fc = self.init_a_fc(_input_neurons_number=input_channel)
input_channel = fc.output_neurons_number
self.units.append(fc)
last_fc = self.init_a_fc(_number=None, _input_neurons_number=input_channel, _output_neurons_number=None,
is_last=True, _mean=None, _std=None)
self.units.append(last_fc)
def init_a_conv(self, _number=None, _filter_width=None, _filter_height=None, _in_channel=None, _out_channel=None,
_stride_width=None, _stride_height=None, _conv_type=None, _mean=None, _std=None):
if _number:
number = _number
else:
number = self.number_id
self.number_id += 1
if _filter_width:
filter_width = _filter_width
else:
filter_width = np.random.randint(self.min_conv_filter_size, self.max_conv_filter_size + 1)
if _filter_height:
filter_height = _filter_height
else:
filter_height = filter_width
if _out_channel:
out_channel = _out_channel
else:
out_channel = np.random.randint(self.min_channel, self.max_channel)
if _stride_width:
stride_width = _stride_width
else:
stride_width = 1 # default stride is one
if _stride_height:
stride_height = _stride_height
else:
stride_height = 1 # default stride is one
if _conv_type:
conv_type = _conv_type
else:
conv_type = 1 # default SAME
if _mean:
mean = _mean
else:
mean = self.min_mean + np.random.random() * (self.max_mean - self.min_mean)
if _std:
std = _std
else:
std = self.min_std + np.random.random() * (self.max_std - self.min_std)
conv = ConvUnit(number, filter_width, filter_height, _in_channel, out_channel, stride_width, stride_height,
conv_type, mean, std)
return conv
def init_a_pool(self, _number=None, _max_or_avg=None, _kernel_width=None, _kernel_height=None, _stride_width=None,
_stride_height=None):
if _number:
number = _number
else:
number = self.number_id
self.number_id += 1
if _max_or_avg:
max_or_avg = _max_or_avg
else:
max_or_avg = np.random.rand()
if _kernel_width:
kernel_width = _kernel_width
else:
kernel_width = np.power(2, self.pool_kernel_size_list[np.random.randint(len(self.pool_kernel_size_list))])
if _kernel_height:
kernel_height = _kernel_height
else:
kernel_height = kernel_width # by default
if _stride_width:
stride_width = _stride_width
else:
stride_width = kernel_width # by default
if _stride_height:
stride_height = _stride_height
else:
stride_height = kernel_width # by default
pool = PoolUnit(number, max_or_avg, kernel_width, kernel_height, stride_width, stride_height)
return pool
def init_a_fc(self, _number=None, _input_neurons_number=None, _output_neurons_number=None, _mean=None, _std=None,
is_last=None):
if _number:
number = _number
else:
number = self.number_id
self.number_id += 1
if is_last:
output_neurons_number = StatusUpdateTool.get_num_class()
else:
if _output_neurons_number:
output_neurons_number = _output_neurons_number
else:
output_neurons_number = self.min_hidden_neurons + np.random.random() * (
self.max_hidden_neurons - self.min_hidden_neurons)
if _mean:
mean = _mean
else:
mean = self.min_mean + np.random.random() * (self.max_mean - self.min_mean)
if _std:
std = _std
else:
std = self.min_std + np.random.random() * (self.max_std - self.min_std)
fc = FcUnit(number, _input_neurons_number, output_neurons_number, mean, std)
return fc
def get_conv_number(self):
number = 0
for unit in self.units:
if unit.type == 1:
number += 1
return number
def get_pool_number(self):
number = 0
for unit in self.units:
if unit.type == 2:
number += 1
return number
def get_fc_number(self):
number = 0
for unit in self.units:
if unit.type == 3:
number += 1
return number
@classmethod
def get_last_output_channel(cls, pos, indi_units):
'''
the position varies between [0,len(indi_units)], and 0 denotes the input channel of the individual,
1 denotes the output channel after one unit
:param pos: position
:param indi_units: a units list like self.units
:return: last output channel before pos
'''
last_output_channel = 0
if pos == 0:
last_output_channel = StatusUpdateTool.get_input_channel()
else:
for i in range(pos - 1, -1, -1):
if indi_units[i].type == 1 or indi_units[i].type == 3:
last_output_channel = indi_units[i].out_channel
break
assert last_output_channel # return not equal to 0
return last_output_channel
@classmethod
def calculate_complexity(cls, indi_units):
current_img_size = StatusUpdateTool.get_input_size()
num_connections = 0
last_output_feature_map_size = StatusUpdateTool.get_input_channel()
for i in range(len(indi_units)):
if indi_units[i].type == 1:
last_output_feature_map_size = indi_units[i].out_channel
num_connections += indi_units[i].out_channel * current_img_size ** 2 + indi_units[i].out_channel
elif indi_units[i].type == 2:
num_connections += last_output_feature_map_size
current_img_size = current_img_size / indi_units[i].kernel_size[0]
else: # indi_units[i].type == 3
num_connections += indi_units[i].input_neurons_number * indi_units[i].output_neurons_number + \
indi_units[i].output_neurons_number
return num_connections
@classmethod
def update_all_channels(cls, indi_units, type, log):
'''
update the channels of all the units, and update the stride size of pool unit
:param indi_units: a units list like self.units
:param type: 0 denotes crossover, 1 denotes mutation
:param log: log in crossover and mutation
:return: updated indi_units
'''
input_channel = StatusUpdateTool.get_input_channel()
is_the_first_fc = True
shrink = 1
for i in range(len(indi_units)):
if indi_units[i].type == 1:
indi_units[i].in_channel = input_channel
# generate log
if type == 0:
log.info('Due to the above crossover, unit at %d changes its input channel from %d to %d' % (
i, indi_units[i].in_channel, input_channel))
else:
log.info('Due to the above mutation, unit at %d changes its input channel from %d to %d' % (
i, indi_units[i].in_channel, input_channel))
input_channel = indi_units[i].out_channel
elif indi_units[i].type == 2:
shrink = shrink * indi_units[i].kernel_size[0]
indi_units[i].stride_size = indi_units[i].kernel_size
elif indi_units[i].type == 3:
if is_the_first_fc:
input_channel = input_channel * (int(StatusUpdateTool.get_input_size() / shrink) ** 2)
is_the_first_fc = False
indi_units[i].input_neurons_number = input_channel
# generate log
if type == 0:
log.info('Due to the above crossover, unit at %d changes its input channel from %d to %d' % (
i, indi_units[i].input_neurons_number, input_channel))
else:
log.info('Due to the above mutation, unit at %d changes its input channel from %d to %d' % (
i, indi_units[i].input_neurons_number, input_channel))
input_channel = indi_units[i].output_neurons_number
return indi_units
def uuid(self):
_str = []
for unit in self.units:
_sub_str = []
if unit.type == 1:
_sub_str.append('conv')
_sub_str.append('number:%d' % (unit.number))
_sub_str.append('filter_size:(%d, %d)' % (unit.filter_size[0], unit.filter_size[1]))
_sub_str.append('in:%d' % (unit.in_channel))
_sub_str.append('out:%d' % (unit.out_channel))
_sub_str.append('stride_size:(%d, %d)' % (unit.stride_size[0], unit.stride_size[1]))
conv_type = 'VALID' if unit.conv_type == 0 else 'SAME'
_sub_str.append('conv_type:%s' % (conv_type))
_sub_str.append('mean:%f' % (unit.mean))
_sub_str.append('std:%f' % (unit.std))
if unit.type == 2:
_sub_str.append('pool')
_sub_str.append('number:%d' % (unit.number))
_pool_type = 0.25 if unit.max_or_avg < 0.5 else 0.75
_sub_str.append('type:%.2f' % (_pool_type))
_sub_str.append('kernel_size:(%d, %d)' % (unit.kernel_size[0], unit.kernel_size[1]))
_sub_str.append('stride_size:(%d, %d)' % (unit.stride_size[0], unit.stride_size[1]))
if unit.type == 3:
_sub_str.append('fc')
_sub_str.append('number:%d' % (unit.number))
_sub_str.append('in:%d' % (unit.input_neurons_number))
_sub_str.append('out:%d' % (unit.output_neurons_number))
_sub_str.append('mean:%f' % (unit.mean))
_sub_str.append('std:%f' % (unit.std))
_str.append('%s%s%s' % ('[', | |
check:
raise ProofGenerationException("Implication failed when spawning constraint %s: %s & %s -/-> %s\n" % (str(con), r1.label(), r2.label(), con.root.label()))
else:
r1,v1 = rvList[0]
antecedents = [v1]
check, implication = csys.manager.justifyImply(r1, con.root)
if not check:
raise ProofGenerationException("Implication failed when spawning constraint %s: %s -/-> %s\n" % (str(con), r1.label(), con.root.label()))
if implication != resolver.tautologyId:
antecedents += [implication]
done = con.root == csys.manager.leaf0
if done:
comment = "Validation of empty clause from infeasible constraint"
else:
comment = "Validation of constraint with BDD root %s" % con.root.label()
con.validation = csys.manager.prover.createClause([con.root.id], antecedents, comment)
if done:
csys.writer.write("UNSAT\n")
csys.manager.summarize()
return con
# Helper function for inserting new element in dictionary
def nzInsert(self, nz, i, v):
if v == 0 and i in nz:
del nz[i]
else:
nz[i] = v
# Add other constraint to self
def add(self, other, csys):
nnz = { i : self[i] for i in self.indices() }
for i in other.indices():
nx = self[i] + other[i]
self.nzInsert(nnz, i, nx)
nc = self.cval + other.cval
return self.spawn(nnz, nc, csys, [self, other])
# Scale a constraint by a constant
def scale(self, const, csys):
if const == 1:
return self
nnz = { i : const * self[i] for i in self.indices() }
nc = const * self.cval
return self.spawn(nnz, nc, csys, [self])
# Generate at-least-one constraint
def alo(self, vlist, csys):
nnz = { i : 1 for i in vlist }
cval = 1
return self.spawn(nnz, cval, csys, [self, other])
# Generate at-most-one constraint
def amo(self, vlist, csys):
nnz = { i : -1 for i in vlist }
cval = -1
return self.spawn(nnz, cval, csys, [self, other])
# Generate at-most-zero constraint
# (i.e., all must be false)
def amz(self, vlist, csys):
nnz = { i : -1 for i in vlist }
cval = 0
return self.spawn(nnz, cval, csys, [self, other])
# Generate BDD representation
def buildBdd(self, csys):
ilist = self.indices()
if len(ilist) == 0:
self.root = csys.manager.leaf1 if self.cval <= 0 else csys.manager.leaf0
self.size = 1
return
ilist.sort(key = lambda id : csys.levelMap[id])
# Determine at what offsets will need node, starting from root and working down
needNodes = { i : {} for i in ilist }
previ = ilist[0]
needNodes[previ][0] = True
for nexti in ilist[1:]:
for offset in needNodes[previ].keys():
# Will need this offset when variable evaluates to 0
needNodes[nexti][offset] = True
# Will need this offset when variable evaluates to 1
noffset = offset + self[previ]
needNodes[nexti][noffset] = True
previ = nexti
# Now build BDD from bottom up
rilist = list(ilist)
rilist.reverse()
# Start at leaves. Determine possible offsets
lasti = rilist[0]
needLeaves = {}
for offset in needNodes[lasti].keys():
needLeaves[offset] = True
noffset = offset + self[lasti]
needLeaves[noffset] = True
leafList = { offset : (csys.manager.leaf1 if offset >= self.cval else csys.manager.leaf0) for offset in needLeaves.keys() }
nodes = { i : {} for i in rilist }
for offset in needNodes[lasti].keys():
low = leafList[offset]
noffset = offset + self[lasti]
high = leafList[noffset]
var = csys.varMap[lasti]
root = low if low == high else csys.manager.findOrMake(var, high, low)
nodes[lasti][offset] = root
nexti = lasti
for previ in rilist[1:]:
for offset in needNodes[previ].keys():
low = nodes[nexti][offset]
noffset = offset + self[previ]
high = nodes[nexti][noffset]
var = csys.varMap[previ]
root = low if low == high else csys.manager.findOrMake(var, high, low)
nodes[previ][offset] = root
nexti = previ
self.root = nodes[ilist[0]][0]
self.size = csys.manager.getSize(self.root)
# Does this constraint have no solution
def isInfeasible(self):
maxsum = 0
for v in self.nz.values():
if v > 0:
maxsum += v
return maxsum < self.cval
def isTrivial(self):
return self.cval <= 0 and len(self) == 0
def __str__(self):
if self.N <= 0:
return self.formatDense()
else:
return self.formatSparse()
# Maintain set of sparse constraints, including index from each index i to those constraints having nonzero value there
class ConstraintSet:
# Unique ID assigned when registered
nextId = 1
# Mapping from id to constraint
conDict = {}
# Mapping from index to list of constraint IDs having nonzero entry at that index
nzMap = {}
# Total number of nonzero terms added
termCount = 0
# Largest constraint added
termMax = 0
def __init__(self, clist = [], writer = None):
self.nextId = 1
self.writer = SimpleWriter() if writer is None else writer
self.conDict = {}
self.nzMap = {}
self.termCount = 0
self.termMax = 0
for con in clist:
self.addConstraint(con)
def addIndex(self, cid, idx):
if idx in self.nzMap:
self.nzMap[idx].append(cid)
else:
self.nzMap[idx] = [cid]
def removeIndex(self, cid, idx):
nlist = [j for j in self.nzMap[idx] if j != cid]
if len(nlist) == 0:
del self.nzMap[idx]
else:
self.nzMap[idx] = nlist
def analyzeConstraint(self, con):
count = len(con)
self.termCount += count
self.termMax = max(self.termMax, count)
def addConstraint(self, con):
cid = self.nextId
self.nextId += 1
self.conDict[cid] = con
for idx in con.nz:
self.addIndex(cid, idx)
self.analyzeConstraint(con)
return cid
def removeConstraint(self, cid):
con = self[cid]
for idx in con.nz:
self.removeIndex(cid, idx)
del self.conDict[cid]
def lookup(self, idx):
if idx in self.nzMap:
return self.nzMap[idx]
else:
return []
def rootList(self):
return [con.root for con in self.conDict.values()]
def __getitem__(self, id):
return self.conDict[id]
def __len__(self):
return len(self.conDict)
def currentCids(self):
return list(self.conDict.keys())
def currentIndices(self):
return list(self.nzMap.keys())
def show(self):
cidList = sorted(self.currentCids())
for cid in cidList:
self.writer.write(" #%d:%s\n" % (cid, str(self[cid])))
# How many total constraints have been generated
def constraintCount(self):
return self.nextId - 1
# System of constraints.
# Support adding constraints to see if can detect conflict
class ConstraintSystem:
# Variable Count
N = 10
verbose = False
writer = None
## Solver state
# Eliminated constraints
sset = None
# Remaining constraints
rset = None
# Supporting BDD operation
manager = None
# Mapping from variable Id to variable
varMap = None
# Mapping from variable Id o level
levelMap = None
## Accumulating data
# Total number of elimination steps
stepCount = 0
# Sum of pivot degrees
pivotDegreeSum = 0
# Max of pivot degrees
pivotDegreeMax = 0
# Total number of vector operations
combineCount = 0
# Mapping from variable ID to True
varUsed = {}
def __init__(self, N, verbose = True, manager = None, writer = None):
self.N = N
self.verbose = verbose
self.manager = manager
if manager is not None:
self.varMap = { var.id : var for var in manager.variables }
self.levelMap = { var.id : var.level for var in manager.variables }
self.writer = SimpleWriter() if writer is None else writer
self.sset = ConstraintSet(writer = self.writer)
self.rset = ConstraintSet(writer = self.writer)
self.varUsed = {}
# Add new constraint to main set
def addConstraint(self, con):
cid = self.rset.addConstraint(con)
for i in con.nz:
self.varUsed[i] = True
if self.manager is not None:
con.buildBdd(self)
return cid
# Given possible pivot index, give a score
def evaluatePivot(self, pidx):
cidList = self.rset.lookup(pidx)
posIndices = [cid for cid in cidList if self.rset[cid][pidx] > 0]
negIndices = [cid for cid in cidList if self.rset[cid][pidx] < 0]
score = 0
for pid in posIndices:
for nid in negIndices:
score += len(self.rset[pid]) + len(self.rset[nid]) - 2
return score
# Given remaining set of constraints, select pivot element and constraint id
def selectPivot(self):
bestPidx = None
bestScore = 0
idList = self.rset.currentIndices()
# Make sure that any ties are broken arbitrarily
# rather than as some artifact of the input file
if randomizePivots:
random.shuffle(idList)
for pidx in idList:
score = self.evaluatePivot(pidx)
if bestPidx is None or score < bestScore:
bestPidx = pidx
bestScore = score
return bestPidx
# Perform one step of FM reduction
# Possible return values:
# "solved", "unsolvable", "normal"
def solutionStep(self):
if len(self.rset) == 0:
return "solved"
self.stepCount += 1
pidx = self.selectPivot()
if pidx is None:
return "solved"
cidList = self.rset.lookup(pidx)
posIndices = [cid for cid in cidList if self.rset[cid][pidx] > 0]
negIndices = [cid for cid in cidList if self.rset[cid][pidx] < 0]
values = [self.rset[cid][pidx] for cid in cidList]
oobValues = [v for v in values if abs(v) > 1]
if len(oobValues) > 0:
raise FourierMotzinException(oobValues)
| |
mail_user_mail_folder_delete_single_value_extended_property(client,
user_id,
mail_folder_id,
single_value_legacy_extended_property_id,
if_match=None):
return client.delete_single_value_extended_properties(user_id=user_id,
mail_folder_id=mail_folder_id,
single_value_legacy_extended_property_id=single_value_legacy_extended_property_id,
if_match=if_match)
def mail_user_mail_folder_list_child_folder(client,
user_id,
mail_folder_id,
orderby=None,
select=None,
expand=None):
return client.list_child_folders(user_id=user_id,
mail_folder_id=mail_folder_id,
orderby=orderby,
select=select,
expand=expand)
def mail_user_mail_folder_list_message(client,
user_id,
mail_folder_id,
orderby=None,
select=None,
expand=None):
return client.list_messages(user_id=user_id,
mail_folder_id=mail_folder_id,
orderby=orderby,
select=select,
expand=expand)
def mail_user_mail_folder_list_message_rule(client,
user_id,
mail_folder_id,
orderby=None,
select=None,
expand=None):
return client.list_message_rules(user_id=user_id,
mail_folder_id=mail_folder_id,
orderby=orderby,
select=select,
expand=expand)
def mail_user_mail_folder_list_multi_value_extended_property(client,
user_id,
mail_folder_id,
orderby=None,
select=None,
expand=None):
return client.list_multi_value_extended_properties(user_id=user_id,
mail_folder_id=mail_folder_id,
orderby=orderby,
select=select,
expand=expand)
def mail_user_mail_folder_list_single_value_extended_property(client,
user_id,
mail_folder_id,
orderby=None,
select=None,
expand=None):
return client.list_single_value_extended_properties(user_id=user_id,
mail_folder_id=mail_folder_id,
orderby=orderby,
select=select,
expand=expand)
def mail_user_mail_folder_show_child_folder(client,
user_id,
mail_folder_id,
mail_folder_id1,
select=None,
expand=None):
return client.get_child_folders(user_id=user_id,
mail_folder_id=mail_folder_id,
mail_folder_id1=mail_folder_id1,
select=select,
expand=expand)
def mail_user_mail_folder_show_message(client,
user_id,
mail_folder_id,
message_id,
select=None,
expand=None):
return client.get_messages(user_id=user_id,
mail_folder_id=mail_folder_id,
message_id=message_id,
select=select,
expand=expand)
def mail_user_mail_folder_show_message_rule(client,
user_id,
mail_folder_id,
message_rule_id,
select=None,
expand=None):
return client.get_message_rules(user_id=user_id,
mail_folder_id=mail_folder_id,
message_rule_id=message_rule_id,
select=select,
expand=expand)
def mail_user_mail_folder_show_multi_value_extended_property(client,
user_id,
mail_folder_id,
multi_value_legacy_extended_property_id,
select=None,
expand=None):
return client.get_multi_value_extended_properties(user_id=user_id,
mail_folder_id=mail_folder_id,
multi_value_legacy_extended_property_id=multi_value_legacy_extended_property_id,
select=select,
expand=expand)
def mail_user_mail_folder_show_single_value_extended_property(client,
user_id,
mail_folder_id,
single_value_legacy_extended_property_id,
select=None,
expand=None):
return client.get_single_value_extended_properties(user_id=user_id,
mail_folder_id=mail_folder_id,
single_value_legacy_extended_property_id=single_value_legacy_extended_property_id,
select=select,
expand=expand)
def mail_user_mail_folder_update_child_folder(client,
user_id,
mail_folder_id,
mail_folder_id1,
id_=None,
child_folder_count=None,
display_name=None,
parent_folder_id=None,
total_item_count=None,
unread_item_count=None,
child_folders=None,
message_rules=None,
messages=None,
multi_value_extended_properties=None,
single_value_extended_properties=None):
body = {}
body['id'] = id_
body['child_folder_count'] = child_folder_count
body['display_name'] = display_name
body['parent_folder_id'] = parent_folder_id
body['total_item_count'] = total_item_count
body['unread_item_count'] = unread_item_count
body['child_folders'] = child_folders
body['message_rules'] = message_rules
body['messages'] = messages
body['multi_value_extended_properties'] = multi_value_extended_properties
body['single_value_extended_properties'] = single_value_extended_properties
return client.update_child_folders(user_id=user_id,
mail_folder_id=mail_folder_id,
mail_folder_id1=mail_folder_id1,
body=body)
def mail_user_mail_folder_update_message(client,
user_id,
mail_folder_id,
message_id,
body,
id_=None,
categories=None,
change_key=None,
created_date_time=None,
last_modified_date_time=None,
bcc_recipients=None,
body_preview=None,
cc_recipients=None,
conversation_id=None,
conversation_index=None,
has_attachments=None,
importance=None,
inference_classification=None,
internet_message_headers=None,
internet_message_id=None,
is_delivery_receipt_requested=None,
is_draft=None,
is_read=None,
is_read_receipt_requested=None,
parent_folder_id=None,
received_date_time=None,
reply_to=None,
sent_date_time=None,
subject=None,
to_recipients=None,
unique_body=None,
web_link=None,
attachments=None,
extensions=None,
multi_value_extended_properties=None,
single_value_extended_properties=None,
email_address=None,
microsoft_graph_email_address=None,
completed_date_time=None,
due_date_time=None,
flag_status=None,
start_date_time=None):
body = {}
body['id'] = id_
body['categories'] = categories
body['change_key'] = change_key
body['created_date_time'] = created_date_time
body['last_modified_date_time'] = last_modified_date_time
body['bcc_recipients'] = bcc_recipients
body['body'] = body
body['body_preview'] = body_preview
body['cc_recipients'] = cc_recipients
body['conversation_id'] = conversation_id
body['conversation_index'] = conversation_index
body['has_attachments'] = has_attachments
body['importance'] = importance
body['inference_classification'] = inference_classification
body['internet_message_headers'] = internet_message_headers
body['internet_message_id'] = internet_message_id
body['is_delivery_receipt_requested'] = is_delivery_receipt_requested
body['is_draft'] = is_draft
body['is_read'] = is_read
body['is_read_receipt_requested'] = is_read_receipt_requested
body['parent_folder_id'] = parent_folder_id
body['received_date_time'] = received_date_time
body['reply_to'] = reply_to
body['sent_date_time'] = sent_date_time
body['subject'] = subject
body['to_recipients'] = to_recipients
body['unique_body'] = unique_body
body['web_link'] = web_link
body['attachments'] = attachments
body['extensions'] = extensions
body['multi_value_extended_properties'] = multi_value_extended_properties
body['single_value_extended_properties'] = single_value_extended_properties
body['sender'] = {}
body['sender']['email_address'] = email_address
body['from_property'] = {}
body['from_property']['email_address'] = microsoft_graph_email_address
body['flag'] = {}
body['flag']['completed_date_time'] = completed_date_time
body['flag']['due_date_time'] = due_date_time
body['flag']['flag_status'] = flag_status
body['flag']['start_date_time'] = start_date_time
return client.update_messages(user_id=user_id,
mail_folder_id=mail_folder_id,
message_id=message_id,
body=body)
def mail_user_mail_folder_update_message_rule(client,
user_id,
mail_folder_id,
message_rule_id,
id_=None,
display_name=None,
has_error=None,
is_enabled=None,
is_read_only=None,
sequence=None,
body_contains=None,
body_or_subject_contains=None,
categories=None,
from_addresses=None,
has_attachments=None,
header_contains=None,
importance=None,
exceptions_is_approval_request=None,
exceptions_is_automatic_forward=None,
exceptions_is_automatic_reply=None,
exceptions_is_encrypted=None,
exceptions_is_meeting_request=None,
exceptions_is_meeting_response=None,
exceptions_is_non_delivery_report=None,
exceptions_is_permission_controlled=None,
exceptions_is_read_receipt=None,
exceptions_is_signed=None,
exceptions_is_voicemail=None,
message_action_flag=None,
not_sent_to_me=None,
recipient_contains=None,
sender_contains=None,
sensitivity=None,
sent_cc_me=None,
sent_only_to_me=None,
sent_to_addresses=None,
sent_to_me=None,
sent_to_or_cc_me=None,
subject_contains=None,
within_size_range=None,
microsoft_graph_message_rule_predicates_body_contains=None,
microsoft_graph_message_rule_predicates_body_or_subject_contains_body_or_subject_contains=None,
microsoft_graph_message_rule_predicates_categories=None,
microsoft_graph_message_rule_predicates_from_addresses=None,
boolean_has_attachments=None,
microsoft_graph_message_rule_predicates_header_contains=None,
microsoft_graph_importance=None,
is_approval_request=None,
is_automatic_forward=None,
is_automatic_reply=None,
is_encrypted=None,
is_meeting_request=None,
is_meeting_response=None,
is_non_delivery_report=None,
is_permission_controlled=None,
is_read_receipt=None,
is_signed=None,
is_voicemail=None,
microsoft_graph_message_action_flag_message_action_flag=None,
boolean_not_sent_to_me=None,
microsoft_graph_message_rule_predicates_recipient_contains=None,
microsoft_graph_message_rule_predicates_sender_contains=None,
microsoft_graph_sensitivity=None,
boolean_sent_cc_me=None,
boolean_sent_only_to_me=None,
microsoft_graph_message_rule_predicates_sent_to_addresses_sent_to_addresses=None,
boolean_sent_to_me=None,
boolean_sent_to_or_cc_me=None,
microsoft_graph_message_rule_predicates_subject_contains=None,
microsoft_graph_size_range_within_size_range=None,
assign_categories=None,
copy_to_folder=None,
delete=None,
forward_as_attachment_to=None,
forward_to=None,
mark_as_read=None,
mark_importance=None,
move_to_folder=None,
permanent_delete=None,
redirect_to=None,
stop_processing_rules=None):
body = {}
body['id'] = id_
body['display_name'] = display_name
body['has_error'] = has_error
body['is_enabled'] = is_enabled
body['is_read_only'] = is_read_only
body['sequence'] = sequence
body['exceptions'] = {}
body['exceptions']['body_contains'] = body_contains
body['exceptions']['body_or_subject_contains'] = body_or_subject_contains
body['exceptions']['categories'] = categories
body['exceptions']['from_addresses'] = from_addresses
body['exceptions']['has_attachments'] = has_attachments
body['exceptions']['header_contains'] = header_contains
body['exceptions']['importance'] = importance
body['exceptions']['is_approval_request'] = exceptions_is_approval_request
body['exceptions']['is_automatic_forward'] = exceptions_is_automatic_forward
body['exceptions']['is_automatic_reply'] = exceptions_is_automatic_reply
body['exceptions']['is_encrypted'] = exceptions_is_encrypted
body['exceptions']['is_meeting_request'] = exceptions_is_meeting_request
body['exceptions']['is_meeting_response'] = exceptions_is_meeting_response
body['exceptions']['is_non_delivery_report'] = exceptions_is_non_delivery_report
body['exceptions']['is_permission_controlled'] = exceptions_is_permission_controlled
body['exceptions']['is_read_receipt'] = exceptions_is_read_receipt
body['exceptions']['is_signed'] = exceptions_is_signed
body['exceptions']['is_voicemail'] = exceptions_is_voicemail
body['exceptions']['message_action_flag'] = message_action_flag
body['exceptions']['not_sent_to_me'] = not_sent_to_me
body['exceptions']['recipient_contains'] = recipient_contains
body['exceptions']['sender_contains'] = sender_contains
body['exceptions']['sensitivity'] = sensitivity
body['exceptions']['sent_cc_me'] = sent_cc_me
body['exceptions']['sent_only_to_me'] = sent_only_to_me
body['exceptions']['sent_to_addresses'] = sent_to_addresses
body['exceptions']['sent_to_me'] = sent_to_me
body['exceptions']['sent_to_or_cc_me'] = sent_to_or_cc_me
body['exceptions']['subject_contains'] = subject_contains
body['exceptions']['within_size_range'] = within_size_range
body['conditions'] = {}
body['conditions']['body_contains'] = microsoft_graph_message_rule_predicates_body_contains
body['conditions']['body_or_subject_contains'] = microsoft_graph_message_rule_predicates_body_or_subject_contains_body_or_subject_contains
body['conditions']['categories'] = microsoft_graph_message_rule_predicates_categories
body['conditions']['from_addresses'] = microsoft_graph_message_rule_predicates_from_addresses
body['conditions']['has_attachments'] = boolean_has_attachments
body['conditions']['header_contains'] = microsoft_graph_message_rule_predicates_header_contains
body['conditions']['importance'] = microsoft_graph_importance
body['conditions']['is_approval_request'] = is_approval_request
body['conditions']['is_automatic_forward'] = is_automatic_forward
body['conditions']['is_automatic_reply'] = is_automatic_reply
body['conditions']['is_encrypted'] = is_encrypted
body['conditions']['is_meeting_request'] = is_meeting_request
body['conditions']['is_meeting_response'] = is_meeting_response
body['conditions']['is_non_delivery_report'] = is_non_delivery_report
body['conditions']['is_permission_controlled'] = is_permission_controlled
body['conditions']['is_read_receipt'] = is_read_receipt
body['conditions']['is_signed'] = is_signed
body['conditions']['is_voicemail'] = is_voicemail
body['conditions']['message_action_flag'] = microsoft_graph_message_action_flag_message_action_flag
body['conditions']['not_sent_to_me'] = boolean_not_sent_to_me
body['conditions']['recipient_contains'] = microsoft_graph_message_rule_predicates_recipient_contains
body['conditions']['sender_contains'] = microsoft_graph_message_rule_predicates_sender_contains
body['conditions']['sensitivity'] = microsoft_graph_sensitivity
body['conditions']['sent_cc_me'] = boolean_sent_cc_me
body['conditions']['sent_only_to_me'] = boolean_sent_only_to_me
body['conditions']['sent_to_addresses'] = microsoft_graph_message_rule_predicates_sent_to_addresses_sent_to_addresses
body['conditions']['sent_to_me'] = boolean_sent_to_me
body['conditions']['sent_to_or_cc_me'] = boolean_sent_to_or_cc_me
body['conditions']['subject_contains'] = microsoft_graph_message_rule_predicates_subject_contains
body['conditions']['within_size_range'] = microsoft_graph_size_range_within_size_range
body['actions'] = {}
body['actions']['assign_categories'] = assign_categories
body['actions']['copy_to_folder'] = copy_to_folder
body['actions']['delete'] = delete
body['actions']['forward_as_attachment_to'] = forward_as_attachment_to
body['actions']['forward_to'] = forward_to
body['actions']['mark_as_read'] = mark_as_read
body['actions']['mark_importance'] = mark_importance
body['actions']['move_to_folder'] = move_to_folder
body['actions']['permanent_delete'] = permanent_delete
body['actions']['redirect_to'] = redirect_to
body['actions']['stop_processing_rules'] = stop_processing_rules
return client.update_message_rules(user_id=user_id,
mail_folder_id=mail_folder_id,
message_rule_id=message_rule_id,
body=body)
def mail_user_mail_folder_update_multi_value_extended_property(client,
user_id,
mail_folder_id,
multi_value_legacy_extended_property_id,
id_=None,
value=None):
body = {}
body['id'] = id_
body['value'] = value
return client.update_multi_value_extended_properties(user_id=user_id,
mail_folder_id=mail_folder_id,
multi_value_legacy_extended_property_id=multi_value_legacy_extended_property_id,
body=body)
def mail_user_mail_folder_update_single_value_extended_property(client,
user_id,
mail_folder_id,
single_value_legacy_extended_property_id,
id_=None,
value=None):
body = {}
body['id'] = id_
body['value'] = value
return client.update_single_value_extended_properties(user_id=user_id,
mail_folder_id=mail_folder_id,
single_value_legacy_extended_property_id=single_value_legacy_extended_property_id,
body=body)
def mail_user_mail_folder_message_create_attachment(client,
user_id,
mail_folder_id,
message_id,
content_type,
id_=None,
is_inline=None,
last_modified_date_time=None,
name=None,
size=None):
body = {}
body['id'] = id_
body['content_type'] = content_type
body['is_inline'] = is_inline
body['last_modified_date_time'] = last_modified_date_time
body['name'] = name
body['size'] = size
return client.create_attachments(user_id=user_id,
mail_folder_id=mail_folder_id,
message_id=message_id,
body=body)
def mail_user_mail_folder_message_create_extension(client,
user_id,
mail_folder_id,
message_id,
id_=None):
body = {}
body['id'] = id_
return client.create_extensions(user_id=user_id,
mail_folder_id=mail_folder_id,
message_id=message_id,
body=body)
def mail_user_mail_folder_message_create_multi_value_extended_property(client,
user_id,
mail_folder_id,
message_id,
id_=None,
value=None):
body = {}
body['id'] = id_
body['value'] = value
return client.create_multi_value_extended_properties(user_id=user_id,
mail_folder_id=mail_folder_id,
message_id=message_id,
body=body)
def mail_user_mail_folder_message_create_single_value_extended_property(client,
user_id,
mail_folder_id,
message_id,
id_=None,
value=None):
body = {}
body['id'] = id_
body['value'] = value
return client.create_single_value_extended_properties(user_id=user_id,
mail_folder_id=mail_folder_id,
message_id=message_id,
body=body)
def mail_user_mail_folder_message_delete_attachment(client,
user_id,
mail_folder_id,
message_id,
attachment_id,
if_match=None):
return client.delete_attachments(user_id=user_id,
mail_folder_id=mail_folder_id,
message_id=message_id,
attachment_id=attachment_id,
if_match=if_match)
def mail_user_mail_folder_message_delete_extension(client,
user_id,
mail_folder_id,
message_id,
extension_id,
if_match=None):
return client.delete_extensions(user_id=user_id,
mail_folder_id=mail_folder_id,
message_id=message_id,
extension_id=extension_id,
if_match=if_match)
def mail_user_mail_folder_message_delete_multi_value_extended_property(client,
user_id,
mail_folder_id,
message_id,
multi_value_legacy_extended_property_id,
if_match=None):
return client.delete_multi_value_extended_properties(user_id=user_id,
mail_folder_id=mail_folder_id,
message_id=message_id,
multi_value_legacy_extended_property_id=multi_value_legacy_extended_property_id,
if_match=if_match)
def mail_user_mail_folder_message_delete_single_value_extended_property(client,
user_id,
mail_folder_id,
message_id,
single_value_legacy_extended_property_id,
if_match=None):
return client.delete_single_value_extended_properties(user_id=user_id,
mail_folder_id=mail_folder_id,
message_id=message_id,
single_value_legacy_extended_property_id=single_value_legacy_extended_property_id,
if_match=if_match)
def mail_user_mail_folder_message_list_attachment(client,
user_id,
mail_folder_id,
message_id,
orderby=None,
select=None,
expand=None):
return client.list_attachments(user_id=user_id,
mail_folder_id=mail_folder_id,
message_id=message_id,
orderby=orderby,
select=select,
expand=expand)
def mail_user_mail_folder_message_list_extension(client,
user_id,
mail_folder_id,
message_id,
orderby=None,
select=None,
expand=None):
return client.list_extensions(user_id=user_id,
mail_folder_id=mail_folder_id,
message_id=message_id,
orderby=orderby,
select=select,
expand=expand)
def mail_user_mail_folder_message_list_multi_value_extended_property(client,
user_id,
mail_folder_id,
message_id,
orderby=None,
select=None,
expand=None):
return client.list_multi_value_extended_properties(user_id=user_id,
mail_folder_id=mail_folder_id,
message_id=message_id,
orderby=orderby,
select=select,
expand=expand)
def mail_user_mail_folder_message_list_single_value_extended_property(client,
user_id,
mail_folder_id,
message_id,
orderby=None,
select=None,
expand=None):
return client.list_single_value_extended_properties(user_id=user_id,
mail_folder_id=mail_folder_id,
message_id=message_id,
orderby=orderby,
select=select,
expand=expand)
def mail_user_mail_folder_message_show_attachment(client,
user_id,
mail_folder_id,
message_id,
attachment_id,
select=None,
expand=None):
return client.get_attachments(user_id=user_id,
mail_folder_id=mail_folder_id,
message_id=message_id,
attachment_id=attachment_id,
select=select,
expand=expand)
def mail_user_mail_folder_message_show_extension(client,
user_id,
mail_folder_id,
message_id,
extension_id,
select=None,
expand=None):
return client.get_extensions(user_id=user_id,
mail_folder_id=mail_folder_id,
message_id=message_id,
extension_id=extension_id,
select=select,
expand=expand)
def mail_user_mail_folder_message_show_multi_value_extended_property(client,
user_id,
mail_folder_id,
message_id,
multi_value_legacy_extended_property_id,
select=None,
expand=None):
return client.get_multi_value_extended_properties(user_id=user_id,
mail_folder_id=mail_folder_id,
message_id=message_id,
multi_value_legacy_extended_property_id=multi_value_legacy_extended_property_id,
select=select,
expand=expand)
def mail_user_mail_folder_message_show_single_value_extended_property(client,
user_id,
mail_folder_id,
message_id,
single_value_legacy_extended_property_id,
select=None,
expand=None):
return client.get_single_value_extended_properties(user_id=user_id,
mail_folder_id=mail_folder_id,
message_id=message_id,
single_value_legacy_extended_property_id=single_value_legacy_extended_property_id,
select=select,
expand=expand)
def mail_user_mail_folder_message_update_attachment(client,
user_id,
mail_folder_id,
message_id,
attachment_id,
content_type,
id_=None,
is_inline=None,
last_modified_date_time=None,
name=None,
size=None):
body = {}
body['id'] = id_
body['content_type'] = content_type
body['is_inline'] = is_inline
body['last_modified_date_time'] = last_modified_date_time
body['name'] = name
body['size'] = size
return client.update_attachments(user_id=user_id,
mail_folder_id=mail_folder_id,
message_id=message_id,
attachment_id=attachment_id,
body=body)
def mail_user_mail_folder_message_update_extension(client,
user_id,
mail_folder_id,
message_id,
extension_id,
id_=None):
body = {}
body['id'] = id_
return client.update_extensions(user_id=user_id,
mail_folder_id=mail_folder_id,
message_id=message_id,
extension_id=extension_id,
body=body)
def mail_user_mail_folder_message_update_multi_value_extended_property(client,
user_id,
mail_folder_id,
message_id,
multi_value_legacy_extended_property_id,
id_=None,
value=None):
body = {}
body['id'] = id_
body['value'] = value
return client.update_multi_value_extended_properties(user_id=user_id,
mail_folder_id=mail_folder_id,
message_id=message_id,
multi_value_legacy_extended_property_id=multi_value_legacy_extended_property_id,
body=body)
def mail_user_mail_folder_message_update_single_value_extended_property(client,
user_id,
mail_folder_id,
message_id,
single_value_legacy_extended_property_id,
id_=None,
value=None):
body = {}
body['id'] = id_
body['value'] = value
return client.update_single_value_extended_properties(user_id=user_id,
mail_folder_id=mail_folder_id,
message_id=message_id,
single_value_legacy_extended_property_id=single_value_legacy_extended_property_id,
body=body)
def mail_user_message_create_attachment(client,
user_id,
message_id,
content_type,
id_=None,
is_inline=None,
last_modified_date_time=None,
name=None,
size=None):
body = {}
body['id'] = id_
body['content_type'] = content_type
body['is_inline'] = is_inline
body['last_modified_date_time'] = last_modified_date_time
body['name'] = name
body['size'] = size
return client.create_attachments(user_id=user_id,
message_id=message_id,
body=body)
def mail_user_message_create_extension(client,
user_id,
message_id,
id_=None):
body = {}
body['id'] = id_
return client.create_extensions(user_id=user_id,
message_id=message_id,
body=body)
def mail_user_message_create_multi_value_extended_property(client,
user_id,
message_id,
id_=None,
value=None):
body = {}
body['id'] = id_
body['value'] = value
return client.create_multi_value_extended_properties(user_id=user_id,
message_id=message_id,
body=body)
def mail_user_message_create_single_value_extended_property(client,
user_id,
message_id,
id_=None,
value=None):
body = {}
body['id'] = id_
body['value'] = value
return client.create_single_value_extended_properties(user_id=user_id,
message_id=message_id,
body=body)
def mail_user_message_delete_attachment(client,
user_id,
message_id,
attachment_id,
if_match=None):
return client.delete_attachments(user_id=user_id,
message_id=message_id,
attachment_id=attachment_id,
if_match=if_match)
def mail_user_message_delete_extension(client,
user_id,
message_id,
extension_id,
if_match=None):
return client.delete_extensions(user_id=user_id,
message_id=message_id,
extension_id=extension_id,
if_match=if_match)
def mail_user_message_delete_multi_value_extended_property(client,
user_id,
message_id,
multi_value_legacy_extended_property_id,
if_match=None):
return client.delete_multi_value_extended_properties(user_id=user_id,
message_id=message_id,
multi_value_legacy_extended_property_id=multi_value_legacy_extended_property_id,
if_match=if_match)
def mail_user_message_delete_single_value_extended_property(client,
user_id,
message_id,
single_value_legacy_extended_property_id,
if_match=None):
return client.delete_single_value_extended_properties(user_id=user_id,
message_id=message_id,
single_value_legacy_extended_property_id=single_value_legacy_extended_property_id,
if_match=if_match)
def mail_user_message_list_attachment(client,
user_id,
message_id,
orderby=None,
select=None,
expand=None):
return client.list_attachments(user_id=user_id,
message_id=message_id,
orderby=orderby,
select=select,
expand=expand)
def mail_user_message_list_extension(client,
user_id,
message_id,
orderby=None,
select=None,
expand=None):
return client.list_extensions(user_id=user_id,
message_id=message_id,
orderby=orderby,
select=select,
expand=expand)
def mail_user_message_list_multi_value_extended_property(client,
user_id,
message_id,
orderby=None,
select=None,
expand=None):
return client.list_multi_value_extended_properties(user_id=user_id,
message_id=message_id,
orderby=orderby,
select=select,
expand=expand)
def mail_user_message_list_single_value_extended_property(client,
user_id,
message_id,
orderby=None,
select=None,
expand=None):
return client.list_single_value_extended_properties(user_id=user_id,
message_id=message_id,
orderby=orderby,
select=select,
expand=expand)
def mail_user_message_show_attachment(client,
user_id,
message_id,
attachment_id,
select=None,
expand=None):
return client.get_attachments(user_id=user_id,
message_id=message_id,
attachment_id=attachment_id,
select=select,
expand=expand)
def mail_user_message_show_extension(client,
user_id,
message_id,
extension_id,
select=None,
expand=None):
return client.get_extensions(user_id=user_id,
message_id=message_id,
extension_id=extension_id,
select=select,
expand=expand)
def mail_user_message_show_multi_value_extended_property(client,
user_id,
message_id,
multi_value_legacy_extended_property_id,
| |
map -> backward map
self.reverse_map: Dict[nd.Node, Union[nd.Node, nd.Map]] = {}
#: mapping from forward_node -> BackwardResult for that node
self.result_map: Dict[nd.Node, BackwardResult] = {}
#: mapping from forward name to gradient name for arrays
self.array_grad_map: Dict[str, str] = {}
# checks if backward has already been applied
self._applied = False
self.apply_strict = apply_strict
self.zero_non_transients = zero_non_transients
for outp in self.given_gradients:
if outp not in self.forward_state:
raise AutoDiffException(
"Could not find output {} in state {}".format(
outp, self.forward_state))
for inp in self.required_gradients:
if inp not in self.forward_state:
raise AutoDiffException(
"Could not find input {} in state {}".format(
inp, self.forward_state))
# check for inplace operations (i.e. duplicated access nodes)
if _has_inplace_operation(self.forward_state):
raise AutoDiffException(
"Inplace operations are currently not supported in autodiff")
if sdfg is backward_sdfg:
# this only makes sense if the output is a single scalar.
if len(given_gradients) != 1:
raise AutoDiffException(
"When the forward sdfg is the same as the backward sdfg, outputs must be a"
"single scalar")
if not _is_int_value(
sdfg.arrays[given_gradients[0].data].total_size, 1):
raise AutoDiffException(
"When the forward sdfg is the same as the backward sdfg, outputs must be a"
"single scalar")
self.separate_sdfgs = False
else:
self.separate_sdfgs = True
def _expand_nodes(self, subgraph: dstate.StateSubgraphView) -> bool:
""" Expand all library nodes in the graph to pure implementations. Returns whether something was expanded
"""
expanded_something = False
for node, state in subgraph.all_nodes_recursive():
if isinstance(state, dstate.StateSubgraphView):
state = state.graph
# check if the node exists in the backward implementation repository
if find_backward_implementation(state.parent, state,
node) is not None:
continue
# only check others if we didn't break out of the above loop
if isinstance(node, ONNXOp):
impls = ONNXForward.registered_implementations(
node.schema.name)
# order the implementations so that implementations containing "pure" are tried first
impls = [i for name, i in impls if "pure" in name
] + [i for name, i in impls if "pure" not in name]
for impl in impls:
if impl.forward_can_be_applied(node, state, self.sdfg):
# try to apply the expansion
class Expansion(xf.ExpandTransformation):
environments = []
_expansion_result = None
@classmethod
def expansion(cls, node, state, sdfg):
return impl.forward(node, state, sdfg)
@staticmethod
def annotates_memlets() -> bool:
return True
Expansion._match_node = xf.PatternNode(type(node))
Expansion.apply_to(state.parent,
verify=False,
_match_node=node)
expanded_something = True
break
# This could later on be changed to check if the expansion is differentiable and if not, move
# on to the next expansion. For now we will just apply the first one that matches, prioritizing ones that
# have "pure" in the name
if isinstance(node,
nd.LibraryNode) and not isinstance(node, ONNXOp):
# try to select an expansion
if hasattr(node, "implementations"):
implementations = node.implementations
pure_candidates = [
name for name, impl in implementations.items()
if "pure" in name
]
if len(pure_candidates) > 0:
expansion = pure_candidates[0]
else:
expansion = node.implementation
else:
expansion = node.implementation
node.implementation = expansion
node.expand(state.parent, state)
expanded_something = True
return expanded_something
def _disambiguate_direction_dependent_views(self):
""" Consider the following subgraph:
(A) -- y --> (n) -- x --> (C)
In dace, if B is a View node and A and C are access nodes, and y and x both have data set to A.data and
B.data respectively, the semantics of the graph depend on the order in which it is executed, i.e. reversing
the subgraph doesn't perform as expected anymore. To disambiguate this case, we set y.data to the View's
data.
"""
for n in self.forward_state.nodes():
if isinstance(
n, nd.AccessNode) and type(n.desc(self.sdfg)) is dt.View:
in_edges = self.forward_state.in_edges(n)
out_edges = self.forward_state.out_edges(n)
if len(in_edges) == 1 and len(out_edges) == 1:
A = in_edges[0].src
y = in_edges[0].data
C = out_edges[0].dst
x = out_edges[0].data
if (isinstance(A, nd.AccessNode)
and isinstance(C, nd.AccessNode)
and y.data == A.data and x.data == C.data):
# flip the memlet
y.subset, y.other_subset = y.other_subset, y.subset
y.data = n.data
y.try_initialize(self.sdfg, self.forward_state,
in_edges[0])
def backward(
self
) -> Tuple[BackwardResult, Dict[str, dt.Array], Dict[str, dt.Array]]:
""" Generate the backward pass in backward_state.
:return: tuple of:
* the backward result (see :class:`~daceml.autodiff.backward_implementation.BackwardResult`)
* dict of data descriptors for the gradients (i.e. the outputs of the backward pass)
* dict of data descriptors of required outputs from the forward pass. These need to be added to the
parent SDFG of the backward pass.
"""
if self._applied:
raise AutoDiffException(
"Backward may only be called once. Instantiate a new BackwardPassGenerator."
)
forward_subgraph = self._find_subgraph_to_differentiate()
# expand until there is nothing left to expand
while self._expand_nodes(forward_subgraph):
# Nodes have been expanded again on the expanded graph; recalculate the forward graph
forward_subgraph = self._find_subgraph_to_differentiate()
if self.apply_strict:
self.sdfg.apply_strict_transformations()
forward_subgraph = self._find_subgraph_to_differentiate()
# check that all edges are float
for edge, parent_subgraph in forward_subgraph.all_edges_recursive():
if isinstance(parent_subgraph, SDFGState):
parent_sdfg = parent_subgraph.parent
elif isinstance(parent_subgraph, dstate.StateSubgraphView):
parent_sdfg = parent_subgraph.graph.parent
elif isinstance(parent_subgraph, SDFG):
# if there are any fancy things on the interstate edges we should probably throw an error
continue
else:
raise AutoDiffException("Unexpected subgraph structure")
if edge.data.data:
edge_type = parent_sdfg.arrays[edge.data.data].dtype
if edge_type not in [dace.float16, dace.float32, dace.float64]:
raise AutoDiffException(
f"Expected Subgraph to differentiate to only contain float edges, but data {edge.data}"
f" on edge {edge} has type {edge_type}")
self._disambiguate_direction_dependent_views()
# recursively reverse the subgraph
self._reverse_subgraph(forward_subgraph)
self._applied = True
# in some cases (accessnode -> accessnode), the descriptors for the gradients of the function outputs are not
# added yet. Add them now
for given_grad in self.given_gradients:
if self.array_grad_name(
given_grad.data) not in self.backward_sdfg.arrays:
self._add_gradient_data_descriptor(given_grad.data)
# prepare the output
required_grad_names = {
name.data: self.array_grad_name(name.data)
for name in self.required_gradients
}
given_grad_names = {
name.data: self.array_grad_name(name.data)
for name in self.given_gradients
}
result = BackwardResult(required_grad_names=required_grad_names,
given_grad_names=given_grad_names)
return result, self.backward_grad_arrays, self.backward_input_arrays
def _find_subgraph_to_differentiate(self) -> dstate.StateSubgraphView:
""" Determine which nodes we need to reverse; this forms the subgraph we will differentiate:
we do a reverse BFS and a forward BFS, then take the intersection of nodes found.
To calculate the gradients for a node x in ``required_gradients``, we need to sum up consider the gradient
contributions from every node y where x is used as an input. We thus first do a forward BFS. Also, the
gradient contributions of all nodes that are not connected by a path to a ``given_gradient`` node are
implicitly zero. Thus, we take the intersection of the two BFSs.
"""
forward_nodes = {
n
for e in self.forward_state.bfs_edges(self.required_gradients)
for n in [e.src, e.dst]
}
backward_nodes = {
n
for e in self.forward_state.bfs_edges(self.given_gradients,
reverse=True)
for n in [e.src, e.dst]
}
forward_subgraph = dstate.StateSubgraphView(
self.forward_state,
list(forward_nodes.intersection(backward_nodes)))
return forward_subgraph
def array_grad_name(self, forward_name: str) -> str:
""" Return the gradient name of a name from the forward pass """
if forward_name not in self.array_grad_map:
self.array_grad_map[forward_name] = \
find_str_not_in_set(set(self.backward_sdfg.arrays), forward_name + "_gradient")
return self.array_grad_map[forward_name]
def _init_grad(self, data: str):
""" Add a state where `data` is initialized with zero.
self.sdfg.arrays[data] should have type Union[dt.Array, dt.Scalar, dt.View]
"""
arr = self.backward_sdfg.arrays[data]
# No need to initialize if gradients point to outputs
if not self.zero_non_transients and not arr.transient:
return
state = self.backward_sdfg.add_state_before(self.backward_state,
label="init_" + data)
scalar = 0
if dtypes.can_access(dtypes.ScheduleType.CPU_Multicore, arr.storage):
cuda = False
elif dtypes.can_access(dtypes.ScheduleType.GPU_Default, arr.storage):
cuda = True
else:
raise ValueError(f"Unsupported storage {arr.storage}")
if type(arr) is dt.Array or type(arr) is dt.Scalar:
state.add_mapped_tasklet(
"_init_" + data + "_", {
"i{}".format(i): "0:{}".format(shape)
for i, shape in enumerate(arr.shape)
}, {},
"__out = {}".format(scalar), {
"__out":
dace.Memlet.simple(
data, ", ".join("i{}".format(i)
for i in range(len(arr.shape))))
},
schedule=dtypes.ScheduleType.GPU_Device
if cuda else dtypes.ScheduleType.Default,
external_edges=True)
elif type(arr) is dt.View:
# not need to initialize: the viewed array will always be visited
# (since a view can never be a required grad), and thus the viewed array will be initialized.
pass
else:
raise AutoDiffException(
"Unsupported data descriptor {}".format(arr))
def _reverse_subgraph(self, subgraph: dstate.StateSubgraphView):
""" Reverse a given subgraph. All nodes in the subgraph will be reversed. """
# a reversed topological sort is a topological sort on the reverse graph
for node in reversed(
list(
dutils.dfs_topological_sort(subgraph,
subgraph.source_nodes()))):
try:
# output names on the forward node
# (for which the gradient will be connected as an input on the reverse node)
given_gradients = [
edge.src_conn for edge in subgraph.out_edges(node)
if _path_src_node_in_subgraph(edge, subgraph)
]
# input names | |
O0 . I1Ii111
if 45 - 45: I1Ii111
if 83 - 83: OoOoOO00 . OoooooooOO
if 58 - 58: i11iIiiIii + OoooooooOO % OoooooooOO / IiII / i11iIiiIii
if 62 - 62: OoO0O00 / I1ii11iIi11i
if 7 - 7: OoooooooOO . IiII
def lisp_is_x86 ( ) :
O000OOO0OOo = platform . machine ( )
return ( O000OOO0OOo in ( "x86" , "i686" , "x86_64" ) )
if 32 - 32: Ii1I * O0
if 100 - 100: ooOoO0o % iIii1I11I1II1 * II111iiii - iII111i
if 92 - 92: ooOoO0o
if 22 - 22: Oo0Ooo % iII111i * I1ii11iIi11i / OOooOOo % i11iIiiIii * I11i
if 95 - 95: OoooooooOO - IiII * I1IiiI + OoOoOO00
if 10 - 10: o0oOOo0O0Ooo / i11iIiiIii
if 92 - 92: I11i . I1Ii111
def lisp_is_linux ( ) :
return ( platform . uname ( ) [ 0 ] == "Linux" )
if 85 - 85: I1ii11iIi11i . I1Ii111
if 78 - 78: ooOoO0o * I1Ii111 + iIii1I11I1II1 + iIii1I11I1II1 / I1Ii111 . Ii1I
if 97 - 97: ooOoO0o / I1Ii111 % i1IIi % I1ii11iIi11i
if 18 - 18: iIii1I11I1II1 % I11i
if 95 - 95: ooOoO0o + i11iIiiIii * I1Ii111 - i1IIi * I1Ii111 - iIii1I11I1II1
if 75 - 75: OoooooooOO * IiII
if 9 - 9: IiII - II111iiii + O0 / iIii1I11I1II1 / i11iIiiIii
if 39 - 39: IiII * Oo0Ooo + iIii1I11I1II1 - IiII + OOooOOo
def lisp_process_logfile ( ) :
o0 = "./logs/lisp-{}.log" . format ( lisp_log_id )
if ( os . path . exists ( o0 ) ) : return
if 30 - 30: O0 * OoooooooOO
sys . stdout . close ( )
sys . stdout = open ( o0 , "a" )
if 38 - 38: IiII - I1ii11iIi11i . OoOoOO00 - I1Ii111 . OoooooooOO
lisp_print_banner ( bold ( "logfile rotation" , False ) )
return
if 89 - 89: iIii1I11I1II1
if 21 - 21: I11i % I11i
if 27 - 27: i11iIiiIii / I1ii11iIi11i
if 84 - 84: Oo0Ooo
if 43 - 43: oO0o - OoooooooOO
if 3 - 3: O0 / iII111i
if 31 - 31: OOooOOo + o0oOOo0O0Ooo . OoooooooOO
if 89 - 89: II111iiii + i1IIi + II111iiii
def lisp_i_am ( name ) :
global lisp_log_id , lisp_i_am_itr , lisp_i_am_etr , lisp_i_am_rtr
global lisp_i_am_mr , lisp_i_am_ms , lisp_i_am_ddt , lisp_i_am_core
global lisp_hostname
if 7 - 7: O0 % o0oOOo0O0Ooo + I1ii11iIi11i * iII111i - iII111i
lisp_log_id = name
if ( name == "itr" ) : lisp_i_am_itr = True
if ( name == "etr" ) : lisp_i_am_etr = True
if ( name == "rtr" ) : lisp_i_am_rtr = True
if ( name == "mr" ) : lisp_i_am_mr = True
if ( name == "ms" ) : lisp_i_am_ms = True
if ( name == "ddt" ) : lisp_i_am_ddt = True
if ( name == "core" ) : lisp_i_am_core = True
if 42 - 42: OoOoOO00 * OoOoOO00 * I1Ii111 . I11i
if 51 - 51: OOooOOo % iIii1I11I1II1 - OoooooooOO % ooOoO0o * iIii1I11I1II1 % OoO0O00
if 99 - 99: oO0o * II111iiii * I1Ii111
if 92 - 92: Oo0Ooo
if 40 - 40: OoOoOO00 / IiII
lisp_hostname = socket . gethostname ( )
OOOoO000 = lisp_hostname . find ( "." )
if ( OOOoO000 != - 1 ) : lisp_hostname = lisp_hostname [ 0 : OOOoO000 ]
return
if 57 - 57: II111iiii
if 54 - 54: Oo0Ooo + oO0o + i11iIiiIii
if 28 - 28: oO0o
if 70 - 70: IiII
if 34 - 34: I1Ii111 % IiII
if 3 - 3: II111iiii / OOooOOo + IiII . ooOoO0o . OoO0O00
if 83 - 83: oO0o + OoooooooOO
def lprint ( * args ) :
if ( lisp_debug_logging == False ) : return
if 22 - 22: Ii1I % iII111i * OoooooooOO - o0oOOo0O0Ooo / iIii1I11I1II1
lisp_process_logfile ( )
III11I1 = datetime . datetime . now ( ) . strftime ( "%m/%d/%y %H:%M:%S.%f" )
III11I1 = III11I1 [ : - 3 ]
print "{}: {}:" . format ( III11I1 , lisp_log_id ) ,
for Oo in args : print Oo ,
print ""
try : sys . stdout . flush ( )
except : pass
return
if 84 - 84: OoOoOO00 / I11i * iII111i / oO0o - i11iIiiIii . Oo0Ooo
if 60 - 60: I1ii11iIi11i * I1IiiI
if 17 - 17: OOooOOo % Oo0Ooo / I1ii11iIi11i . IiII * OOooOOo - II111iiii
if 41 - 41: Ii1I
if 77 - 77: I1Ii111
if 65 - 65: II111iiii . I1IiiI % oO0o * OoO0O00
if 38 - 38: OoOoOO00 / iII111i % Oo0Ooo
if 11 - 11: iII111i - oO0o + II111iiii - iIii1I11I1II1
def dprint ( * args ) :
if ( lisp_data_plane_logging ) : lprint ( * args )
return
if 7 - 7: IiII - I11i / II111iiii * Ii1I . iII111i * iII111i
if 61 - 61: I11i % ooOoO0o - OoO0O00 / Oo0Ooo
if 4 - 4: OoooooooOO - i1IIi % Ii1I - OOooOOo * o0oOOo0O0Ooo
if 85 - 85: OoooooooOO * iIii1I11I1II1 . iII111i / OoooooooOO % I1IiiI % O0
if 36 - 36: Ii1I / II111iiii / IiII / IiII + I1ii11iIi11i
if 95 - 95: IiII
if 51 - 51: II111iiii + IiII . i1IIi . I1ii11iIi11i + OoOoOO00 * I1IiiI
if 72 - 72: oO0o + oO0o / II111iiii . OoooooooOO % Ii1I
def debug ( * args ) :
lisp_process_logfile ( )
if 49 - 49: oO0o . OoO0O00 - Oo0Ooo * OoooooooOO . Oo0Ooo
III11I1 = datetime . datetime . now ( ) . strftime ( "%m/%d/%y %H:%M:%S.%f" )
III11I1 = III11I1 [ : - 3 ]
if 2 - 2: OoooooooOO % OOooOOo
print red ( ">>>" , False ) ,
print "{}:" . format ( III11I1 ) ,
for Oo in args : print Oo ,
print red ( "<<<\n" , False )
try : sys . stdout . flush ( )
except : pass
return
if 63 - 63: I1IiiI % iIii1I11I1II1
if 39 - 39: iII111i / II111iiii / I1ii11iIi11i % I1IiiI
if 89 - 89: I1Ii111 + OoooooooOO + I1Ii111 * i1IIi + iIii1I11I1II1 % I11i
if 59 - 59: OOooOOo + i11iIiiIii
if 88 - 88: i11iIiiIii - ooOoO0o
if 67 - 67: OOooOOo . Oo0Ooo + OoOoOO00 - OoooooooOO
if 70 - 70: OOooOOo / II111iiii - iIii1I11I1II1 - iII111i
def lisp_print_banner ( string ) :
global lisp_version , lisp_hostname
if 11 - 11: iIii1I11I1II1 . OoooooooOO . II111iiii / i1IIi - I11i
if ( lisp_version == "" ) :
lisp_version = commands . getoutput ( "cat lisp-version.txt" )
if 30 - 30: OoOoOO00
Ii111 = bold ( lisp_hostname , False )
lprint ( "lispers.net LISP {} {}, version {}, hostname {}" . format ( string ,
datetime . datetime . now ( ) , lisp_version , Ii111 ) )
return
if 67 - 67: O0
if 52 - 52: II111iiii . ooOoO0o / OoOoOO00 / OoooooooOO . i11iIiiIii
if 30 - 30: I11i / Ii1I . IiII . OoooooooOO - Oo0Ooo
if 44 - 44: O0 * OoooooooOO % ooOoO0o + II111iiii
if 39 - 39: oO0o % iIii1I11I1II1 % O0 % OoooooooOO * I1ii11iIi11i + iII111i
if 68 - 68: Oo0Ooo + i11iIiiIii
if 69 - 69: iIii1I11I1II1 * iIii1I11I1II1 * i11iIiiIii + I1IiiI / OOooOOo % Ii1I
def green ( string , html ) :
if ( html ) : return ( '<font color="green"><b>{}</b></font>' . format ( string ) )
return ( bold ( "\033[92m" + string + "\033[0m" , html ) )
if 58 - 58: OOooOOo * o0oOOo0O0Ooo + O0 % OOooOOo
if 25 - 25: Oo0Ooo % I1ii11iIi11i * ooOoO0o
if 6 - 6: iII111i . IiII * OoOoOO00 . i1IIi
if 98 - 98: i1IIi
if 65 | |
device:
:param phase:
:return:
"""
model.eval()
nll = AverageMeter()
aux_data = None
for batch in data_loader:
imgs = batch['image'].to(device)
caps = batch['tokens'].to(device)
# TODO Refactor
if model.decoder.uses_aux_data:
aux_data = batch['emotion'].to(device)
logits, caps_sorted, decode_lengths, alphas, sort_ind = model.decoder(model.encoder(imgs), caps, aux_data)
# Since we decoded starting with <sos>, the targets are all words after <sos>, up to <eos>
targets = caps_sorted[:, 1:]
# Remove time-steps that we didn't decode at, or are pads
# pack_padded_sequence is an easy trick to do this
logits = pack_padded_sequence(logits, decode_lengths, batch_first=True)
targets = pack_padded_sequence(targets, decode_lengths, batch_first=True)
# Calculate loss
loss = F.cross_entropy(logits.data, targets.data)
nll.update(loss.item(), sum(decode_lengths))
return nll.avg
@torch.no_grad()
def log_prob_of_caption(model, img, tokens, temperature=1):
"""Given a captioning model, return the log-probability of a caption given an image.
This version expects a batch of images, each assotiated with a single caption.
:param model: encoder/decoder speaker
:param img: Tensor B x channels x spatial-dims
:param tokens: Tensor B x max-n-tokens
:return log_probs: Tensor of size B x max-n-tokens holding the log-probs of each token of each caption
"""
encoder = model.encoder
decoder = model.decoder
assert all(tokens[:, 0] == decoder.vocab.sos)
max_steps = tokens.shape[1]
encoder_out = encoder(img)
batch_size = encoder_out.size(0)
encoder_dim = encoder_out.size(-1)
encoder_out = encoder_out.view(batch_size, -1, encoder_dim)
# Create tensors to hold log-probs
log_probs = torch.zeros(batch_size, max_steps).to(tokens.device)
h, c = decoder.init_hidden_state(encoder_out)
for t in range(max_steps - 1):
h, c, pred_t, _ = decoder.attend_and_predict_next_word(encoder_out, h, c, tokens[:, t])
if temperature != 1:
pred_t /= temperature
pred_t = F.log_softmax(pred_t, dim=1)
log_probs[:, t] = pred_t[torch.arange(batch_size), tokens[:, t+1]] # prob. of guessing next token
lens = torch.where(tokens == decoder.vocab.eos)[1] # true tokens + 1 for <eos>
mask = torch.zeros_like(log_probs)
mask[torch.arange(mask.shape[0]), lens] = 1
mask = mask.cumsum(dim=1).to(torch.bool)
log_probs.masked_fill_(mask, 0) # set to zero all positions after the true size of the caption
return log_probs, lens
@torch.no_grad()
def sample_captions(model, loader, max_utterance_len, sampling_rule, device, temperature=1,
topk=None, drop_unk=True, drop_bigrams=False):
"""
:param model:
:param loader:
:param max_utterance_len: maximum allowed length of captions
:param sampling_rule: (str) 'argmax' or 'multinomial', or 'topk'
:return:
attention_weights: (torch cpu Tensor) N-images x encoded_image_size (e.g., 7 x 7) x max_utterance_len
attention_weights[:,0] corresponds to the attention map over the <SOS> symbol
"""
if sampling_rule not in ['argmax', 'multinomial', 'topk']:
raise ValueError('Unknown sampling rule.')
model.eval()
all_predictions = []
attention_weights = []
unk = model.decoder.vocab.unk
use_aux_data = model.decoder.uses_aux_data
aux_data = None
for batch in loader:
imgs = batch['image'].to(device)
if use_aux_data:
aux_data = batch['emotion'].to(device)
encoder_out = model.encoder(imgs)
enc_image_size = encoder_out.size(1)
batch_size = encoder_out.size(0)
encoder_dim = encoder_out.size(-1)
# Flatten image
encoder_out = encoder_out.view(batch_size, -1, encoder_dim) # (batch_size, num_pixels, encoder_dim)
# Create tensors to hold word predictions
max_steps = max_utterance_len + 1 # one extra step for EOS marker
predictions = torch.zeros(batch_size, max_steps).to(device)
# Initialize decoder state
decoder = model.decoder
h, c = decoder.init_hidden_state(encoder_out) # (batch_size, decoder_dim)
# Tensor to store previous words at each step; now they're just <sos>
prev_words = torch.LongTensor([decoder.vocab.sos] * batch_size).to(device)
for t in range(max_steps):
h, c, pred_t, alpha = decoder.attend_and_predict_next_word(encoder_out, h, c, prev_words, aux_data=aux_data)
if t > 0: # at t=1 it sees <sos> as the previous word
alpha = alpha.view(-1, enc_image_size, enc_image_size) # (bsize, enc_image_size, enc_image_size)
attention_weights.append(alpha.cpu())
pred_t /= temperature
if drop_unk:
pred_t[:, unk] = -math.inf
if t > 0:
pred_t[:, prev_words] = -math.inf # avoid repeating the same word twice
if t > 1:
pred_t[:, predictions[:,t-2].long()] = -math.inf # avoid repeating the prev-prev word
if drop_bigrams and t > 1:
prev_usage = predictions[:, :t-1] # of the previous word (e.g, xx yy xx) (first xx)
x, y = torch.where(prev_usage == torch.unsqueeze(prev_words, -1))
y += 1 # word-after-last-in-prev-usage (e.g., yy in above)
y = prev_usage[x, y].long()
pred_t[x, y] = -math.inf
if sampling_rule == 'argmax':
prev_words = torch.argmax(pred_t, 1)
elif sampling_rule == 'multinomial':
probability = torch.softmax(pred_t, 1)
prev_words = torch.multinomial(probability, 1).squeeze_(-1)
elif sampling_rule == 'topk':
row_idx = torch.arange(batch_size)
row_idx = row_idx.view([1, -1]).repeat(topk, 1).t()
# do soft-max after you zero-out non topk (you could also do this before, ask me/Panos if need be:) )
val, ind = pred_t.topk(topk, dim=1)
val = torch.softmax(val, 1)
probability = torch.zeros_like(pred_t) # only the top-k logits will have non-zero prob.
probability[row_idx, ind] = val
prev_words = torch.multinomial(probability, 1).squeeze_(-1)
predictions[:, t] = prev_words
all_predictions.append(predictions.cpu().long())
all_predictions = torch.cat(all_predictions)
attention_weights = torch.stack(attention_weights, 1)
return all_predictions, attention_weights
@torch.no_grad()
def sample_captions_beam_search(model, data_loader, beam_size, device, temperature=1, max_iter=500,
drop_unk=True, drop_bigrams=False):
"""
:param model (encoder, decoder)
:param data_loader:
:param beam_size:
:param drop_unk:
:return:
hypotheses_alphas: list carrying the attention maps over the encoded-pixel space for each produced token.
Note: batch size must be one.
"""
if data_loader.batch_size != 1:
raise ValueError('not implemented for bigger batch-sizes')
model.eval()
decoder = model.decoder
vocab = model.decoder.vocab
captions = list()
hypotheses_alphas = list()
caption_log_prob = list()
aux_feat = None
for batch in tqdm.tqdm(data_loader): # For each image (batch-size = 1)
image = batch['image'].to(device) # (1, 3, H, W)
if model.decoder.uses_aux_data:
aux_data = batch['emotion'].to(device)
aux_feat = model.decoder.auxiliary_net(aux_data)
k = beam_size
encoder_out = model.encoder(image) # (1, enc_image_size, enc_image_size, encoder_dim)
enc_image_size = encoder_out.size(1)
encoder_dim = encoder_out.size(3)
# Flatten encoding
encoder_out = encoder_out.view(1, -1, encoder_dim) # (1, num_pixels, encoder_dim)
num_pixels = encoder_out.size(1)
# We'll treat the problem as having a batch size of k
encoder_out = encoder_out.expand(k, num_pixels, encoder_dim) # (k, num_pixels, encoder_dim)
# Tensor to store top k previous words at each step; now they're just <sos>
k_prev_words = torch.LongTensor([[vocab.sos]] * k).to(device) # (k, 1)
# Tensor to store top k sequences; now they're just <sos>
seqs = k_prev_words # (k, 1)
# Tensor to store top k sequences' scores; now they're just 0
top_k_scores = torch.zeros(k, 1).to(device) # (k, 1)
# Tensor to store top k sequences' alphas; now they're just 1s
seqs_alpha = torch.ones(k, 1, enc_image_size, enc_image_size).to(device) # (k, 1, enc_image_size, enc_image_size)
# Lists to store completed sequences and scores
complete_seqs = list()
complete_seqs_alpha = list()
complete_seqs_scores = list()
# Start decoding
step = 1
h, c = decoder.init_hidden_state(encoder_out)
# s (below) is a number less than or equal to k, because sequences are removed
# from this process once they hit <eos>
while True:
embeddings = decoder.word_embedding(k_prev_words).squeeze(1) # (s, embed_dim)
awe, alpha = decoder.attention(encoder_out, h) # (s, encoder_dim), (s, num_pixels)
alpha = alpha.view(-1, enc_image_size, enc_image_size) # (s, enc_image_size, enc_image_size)
gate = decoder.sigmoid(decoder.f_beta(h)) # gating scalar, (s, encoder_dim)
awe = gate * awe
decoder_input = torch.cat([embeddings, awe], dim=1)
if aux_feat is not None:
af = torch.repeat_interleave(aux_feat, decoder_input.shape[0], dim=0)
decoder_input = torch.cat([decoder_input, af], dim=1)
h, c = decoder.decode_step(decoder_input, (h, c)) # (s, decoder_dim)
scores = decoder.next_word(h) # (s, vocab_size)
if temperature != 1:
scores /= temperature
scores = F.log_softmax(scores, dim=1)
if drop_unk:
scores[:, vocab.unk] = -math.inf
if drop_bigrams and step > 2:
# drop bi-grams with frequency higher than 1.
prev_usage = seqs[:, :step-1]
x, y = torch.where(prev_usage == k_prev_words)
y += 1 # word-after-last-in-prev-usage
y = seqs[x, y]
scores[x,y] = -math.inf
if step > 2:
## drop x and x
and_token = decoder.vocab('and')
x, y = torch.where(k_prev_words == and_token)
pre_and_word = seqs[x, step-2]
scores[x, pre_and_word] = -math.inf
# Add log-probabilities
scores = top_k_scores.expand_as(scores) + scores # (s, vocab_size)
# For the first step, all k points will have the same scores (since same k previous words, h, c)
if step == 1:
top_k_scores, top_k_words = scores[0].topk(k, 0, True, True) # (s)
else:
# Unroll and find top scores, and their unrolled indices
top_k_scores, top_k_words = scores.view(-1).topk(k, 0, True, True) # (s)
# Convert unrolled indices to actual indices of scores
prev_word_inds = top_k_words // len(vocab) # (s)
next_word_inds = top_k_words % len(vocab) # (s)
# Add new words to sequences
seqs = torch.cat([seqs[prev_word_inds], next_word_inds.unsqueeze(1)], dim=1) # (s, step+1)
seqs_alpha = torch.cat([seqs_alpha[prev_word_inds], alpha[prev_word_inds].unsqueeze(1)],
dim=1) # (s, step+1, enc_image_size, enc_image_size)
# Which sequences are incomplete (didn't reach <eos>)?
incomplete_inds = [ind for ind, word in enumerate(next_word_inds) if word != vocab.eos]
complete_inds = list(set(range(len(next_word_inds))) - set(incomplete_inds))
# Set aside complete sequences
if len(complete_inds) > 0:
complete_seqs.extend(seqs[complete_inds].tolist())
complete_seqs_alpha.extend(seqs_alpha[complete_inds].tolist())
complete_seqs_scores.extend(top_k_scores[complete_inds].tolist())
k -= len(complete_inds) # reduce beam length accordingly
# Proceed with incomplete sequences
if k == 0:
break
| |
# ref ID: 7
config = {
"name": "Dungeon Arena", #plugin name
"type": "generator", #plugin type
"description": ["Dungeon Arena"] #description
}
import sys
from collections import defaultdict
if __name__ == "__main__":
sys.path.extend(["."])
import os
os.chdir("..")
del (os)
from pgu import gui
from math import sqrt, cos, sin, pi
from random import *
import pygame
from os.path import join as osjoin
from omnitool.database import version, tiles, names
from omnitool.tlib import *
from omnitool.tinterface import *
from omnitool.binarysplit import join, cleanup
from .arena_lib.arenaitems import items as arenaitems
class Generator():
def __init__(self):
pass
def run(self):
from omnitool.shared import lang, theme, exit_prog, __version__
from omnitool.pgu_override import Quitbutton
torch_chances = [lang.at_full, lang.at_blue, lang.at_red, lang.at_green,
lang.at_pink, lang.at_white, lang.at_yellow, lang.at_purple,
lang.at_lime]
name = 'Dungeon Arena'
if hasattr(sys, "frozen"):
import os
os.chdir(os.path.dirname(sys.executable))
def update(slider, label):
label.set_text(str(slider.value))
def update_per(slider, label):
label.set_text(str(slider.value) + "%")
def update_2(slider, label):
label.set_text(str(slider.value * slider.value))
pygame.display.init()
pygame.display.set_caption(name)
def weighted(liste):
n = uniform(0, 1)
for item, weight in liste:
if n < weight:
break
n = n - weight
return item
app = gui.Desktop(theme=theme)
app.connect(gui.QUIT, exit_prog, None)
main = gui.Table()
main.td(gui.Label(lang.a_name), align=-1)
nameinput = gui.Input("Dungeon Arena OT-V" + str(__version__), width=200)
main.td(nameinput, colspan=2)
main.tr()
main.td(gui.Spacer(1, 12))
main.tr()
rooms = gui.HSlider(value=15, min=3, max=31, size=20, height=16, width=150)
roomstext = gui.Label(str(15 * 15))
rooms.connect(gui.CHANGE, update_2, rooms, roomstext)
main.td(gui.Label(lang.a_rooms), align=-1)
main.td(rooms, align=-1)
main.td(roomstext)
main.tr()
roomsize = gui.HSlider(value=12, min=9, max=36, size=20, height=16, width=150)
roomsizetext = gui.Label("12")
roomsize.connect(gui.CHANGE, update, roomsize, roomsizetext)
main.td(gui.Label(lang.a_sidelen), align=-1)
main.td(roomsize, align=-1)
main.td(roomsizetext)
main.tr()
corridor = gui.HSlider(value=6, min=3, max=9, size=20, height=16, width=150)
corridortext = gui.Label("6")
corridor.connect(gui.CHANGE, update, corridor, corridortext)
main.td(gui.Label(lang.a_corlen), align=-1)
main.td(corridor, align=-1)
main.td(corridortext)
main.tr()
main.td(gui.Spacer(1, 12))
main.tr()
chestcount = gui.HSlider(value=100, min=0, max=100, size=20, height=16, width=150)
chesttext = gui.Label("100%")
chestcount.connect(gui.CHANGE, update_per, chestcount, chesttext)
main.td(gui.Label(lang.a_chest), align=-1)
main.td(chestcount, align=-1)
main.td(chesttext)
main.tr()
itemcount = gui.HSlider(value=1, min=0, max=20, size=20, height=16, width=150)
itemtext = gui.Label("1")
itemcount.connect(gui.CHANGE, update, itemcount, itemtext)
main.td(gui.Label(lang.a_itemchest), align=-1)
main.td(itemcount, align=-1)
main.td(itemtext)
main.tr()
main.td(gui.Spacer(1, 12))
main.tr()
torchcount = gui.HSlider(value=100, min=0, max=100, size=20, height=16, width=150)
torchtext = gui.Label("100%")
torchcount.connect(gui.CHANGE, update_per, torchcount, torchtext)
main.td(gui.Label(lang.a_light), align=-1)
main.td(torchcount, align=-1)
main.td(torchtext)
main.tr()
main.td(gui.Spacer(1, 12))
main.tr()
torch_sel = []
main.td(gui.Label(lang.at_chances), align=-1)
main.tr()
for t in torch_chances:
torchsel = gui.HSlider(value=1, min=0, max=10, size=20, height=16, width=150)
torcht = gui.Label("1")
torchsel.connect(gui.CHANGE, update, torchsel, torcht)
main.td(gui.Label(t), align=-1)
main.td(torchsel, align=-1)
main.td(torcht)
torch_sel.append(torchsel)
main.tr()
main.tr()
main.td(gui.Spacer(1, 12))
main.tr()
main.td(gui.Label(lang.a_chances), colspan=2, align=-1)
main.td(gui.Spacer(50, 1))
main.tr()
standardcount = gui.HSlider(value=1, min=0, max=10, size=20, height=16, width=150)
standardtext = gui.Label("1")
standardcount.connect(gui.CHANGE, update, standardcount, standardtext)
main.td(gui.Label(lang.a_standard), align=-1)
main.td(standardcount, align=-1)
main.td(standardtext)
main.tr()
crosscount = gui.HSlider(value=1, min=0, max=10, size=20, height=16, width=150)
crosstext = gui.Label("1")
crosscount.connect(gui.CHANGE, update, crosscount, crosstext)
main.td(gui.Label(lang.a_cross), align=-1)
main.td(crosscount, align=-1)
main.td(crosstext)
main.tr()
main.td(gui.Spacer(1, 12))
main.tr()
main.td(Quitbutton(app, lang.pt_start), colspan=3)
app.run(main)
pygame.display.quit()
selection = [("cross", crosscount.value),
("standard", standardcount.value)]
torch_selection = []
x = 0
for v in torch_sel:
torch_selection.append((x, v.value))
x += 1
weight = 0
for item, w in selection:
weight += w
if not weight:
print("UserError: No rooms to place.")
import time
time.sleep(10)
t_weight = 0
for item, w in torch_selection:
t_weight += w
if not t_weight:
print("UserError: No torches to place. To have no lighting ONLY set the lighting slider to zero.")
import time
time.sleep(10)
for x in range(len(torch_selection)):
torch_selection[x] = (torch_selection[x][0], torch_selection[x][1] / float(t_weight))
for x in range(len(selection)):
selection[x] = (selection[x][0], selection[x][1] / float(weight))
name = nameinput.value
chest_mode = itemcount.value
s = roomsize.value
chestchance = chestcount.value
roomwidth = s
roomheight = s
rooms = rooms.value
border = 250
corridor = corridor.value
torches = torchcount.value
size = (rooms * roomwidth + (rooms - 1) * corridor + border * 2,
rooms * roomheight + (rooms - 1) * corridor + border * 2)
dtile = choice([41, 43, 44])
dwall = choice([7, 8, 9])
if not rooms % 2:
spawn = (roomwidth // 2 + size[0] // 2, roomheight // 2 + size[1] // 2)
else:
spawn = (size[0] // 2, size[1] // 2)
print("Starting Generation")
header = {'spawn': spawn, 'groundlevel': -10.0, 'is_bloodmoon': 0,
'dungeon_xy': spawn, 'worldrect': (0, size[0] * 16, 0, size[1] * 16),
'is_meteor_spawned': 0, 'gob_inv_time': 0, 'rocklevel': size[1] // 2 + 0.4,
'gob_inv_x': 0.0, 'is_day': 1, 'shadow_orbs_broken': 0,
'width': size[0], 'version': version, 'gob_inv_type': 0,
'bosses_slain': (0, 0, 1), "npcs_saved": (0, 0, 0), "special_slain": (0, 0, 0), 'gob_inv_size': 0,
'height': size[1],
'ID': 1394008880, 'moonphase': 0, "hardmode": 0,
'name': name, "altars_broken": 0,
'is_a_shadow_orb_broken': 0, 'time': 13500}
is_exe = hasattr(sys, "frozen")
surface = pygame.surface.Surface(size)
surface.fill((254, 1, 255))
pygame.draw.rect(surface, (dtile, dtile, dtile),
((border - corridor, border - corridor),
(size[0] - border * 2 + corridor * 2, size[1] - border * 2 + corridor * 2)))
plat = (19, 0, 0)
chests = []
# contents of the spawn chest
multis = get_multis()
chestsurflist = (multis["woodchest"],
multis["goldchest"],
multis["shadowchest"],
multis["barrelchest"],
multis["canchest"])
for x in range(rooms): #horizontal
pygame.draw.rect(surface, (252, dwall, 0),
((border + corridor, border + roomheight // 2 - 2 + x * (roomheight + corridor)),
((rooms - 1) * (roomwidth + corridor), 4)))
for x in range(rooms): #vertical
pygame.draw.rect(surface, (252, dwall, 0),
((border + roomwidth // 2 - 2 + x * (roomheight + corridor), border + corridor),
(4, (rooms - 1) * (roomheight + corridor))))
for x in range(rooms):
for y in range(rooms):
rtype = weighted(selection)
ltype = weighted(torch_selection)
#print(ltype)
if rtype == "standard":
pos = (border + x * (roomwidth + corridor), border + y * (roomwidth + corridor))
pygame.draw.rect(surface, (252, dwall, 0),
(pos, (roomwidth, roomheight)))
if torches > randint(0, 100):
surface.set_at(pos, (4, 0, ltype))
surface.set_at((pos[0] + roomwidth - 1, pos[1]), (4, 0, ltype))
surface.set_at((pos[0], pos[1] + roomheight - 1), (4, 0, ltype))
surface.set_at((pos[0] + roomwidth - 1, pos[1] + roomheight - 1), (4, 0, ltype))
#platforms on ground with corridor
pygame.draw.line(surface, plat, (pos[0], pos[1] + roomheight // 2 + 2),
(pos[0] + roomwidth - 1, pos[1] + roomheight // 2 + 2))
#over corridor
pygame.draw.line(surface, plat, (pos[0], pos[1] + roomheight // 2 - 3),
(pos[0] + roomwidth - 1, pos[1] + roomheight // 2 - 3))
if y > 0: #lowest platform
pygame.draw.line(surface, plat, (pos[0] + roomwidth // 2 - 2, pos[1] - 1),
(pos[0] + roomwidth // 2 + 1, pos[1] - 1))
if y < rooms: #high platform
pygame.draw.line(surface, plat, (pos[0] + roomwidth // 2 - 2, pos[1] + roomheight),
(pos[0] + roomwidth // 2 + 1, pos[1] + roomheight))
elif rtype == "cross":
pos = (border + x * (roomwidth + corridor), border + y * (roomwidth + corridor))
if torches > randint(0, 100):
surface.set_at(pos, (4, 0, ltype))
surface.set_at((pos[0] + roomwidth - 1, pos[1]), (4, 0, ltype))
surface.set_at((pos[0], pos[1] + roomheight - 1), (4, 0, ltype))
surface.set_at((pos[0] + roomwidth - 1, pos[1] + roomheight - 1), (4, 0, ltype))
#platforms on ground with corridor
pygame.draw.line(surface, plat,
(pos[0] + roomwidth // 2 - 2, pos[1] + roomheight // 2 + 2),
(pos[0] + roomwidth // 2 + 1, pos[1] + roomheight // 2 + 2))
#over corridor
pygame.draw.line(surface, plat,
(pos[0] + roomwidth // 2 - 2, pos[1] + roomheight // 2 - 3),
(pos[0] + roomwidth // 2 + 1, pos[1] + roomheight // 2 - 3))
if y > 0: #lowest platform
pygame.draw.line(surface, plat,
(pos[0] + roomwidth // 2 - 2, pos[1] - 1),
(pos[0] + roomwidth // 2 + 1, pos[1] - 1))
if y < rooms: #high platform
pygame.draw.line(surface, plat,
(pos[0] + roomwidth // 2 - 2, pos[1] + roomheight),
(pos[0] + roomwidth // 2 + 1, pos[1] + roomheight))
else:
print(rtype)
raise AssertionError("")
if chest_mode and chestchance > randint(0, 100):
content = []
for spam in range(chest_mode):
item = choice(list(arenaitems.keys()))
content.append((arenaitems[item], item, 0))
for i in range(20 - len(content)): #chests always have 20 slots
content.append((0, None))
chests.append(((pos[0] + roomwidth // 2 - 1, pos[1] + roomheight // 2), content))
for chest in chests:
#draw the chests into the world texture
surface.blit(choice(chestsurflist), chest[0])
#surface.blit(multis["shadoworb"], chest[0])
# below is to make sure every chest stands on something, so they dont glitch
d = surface.get_at((chest[0][0], chest[0][1] + 2))[0]
if d > 250 or d == 51:
surface.set_at((chest[0][0], chest[0][1] + 2), (0, 0, 0))
d = surface.get_at((chest[0][0] + 1, chest[0][1] + 2))[0]
if | |
if x == SAHPI_ET_OEM:
return "OEM"
if x == SAHPI_ET_USER:
return "USER"
if x == SAHPI_ET_DIMI:
return "DIMI"
if x == SAHPI_ET_DIMI_UPDATE:
return "DIMI_UPDATE"
if x == SAHPI_ET_FUMI:
return "FUMI"
return repr( x )
def toSaHpiEventTypeT( s ):
if s == "RESOURCE":
return SAHPI_ET_RESOURCE
if s == "DOMAIN":
return SAHPI_ET_DOMAIN
if s == "SENSOR":
return SAHPI_ET_SENSOR
if s == "SENSOR_ENABLE_CHANGE":
return SAHPI_ET_SENSOR_ENABLE_CHANGE
if s == "HOTSWAP":
return SAHPI_ET_HOTSWAP
if s == "WATCHDOG":
return SAHPI_ET_WATCHDOG
if s == "HPI_SW":
return SAHPI_ET_HPI_SW
if s == "OEM":
return SAHPI_ET_OEM
if s == "USER":
return SAHPI_ET_USER
if s == "DIMI":
return SAHPI_ET_DIMI
if s == "DIMI_UPDATE":
return SAHPI_ET_DIMI_UPDATE
if s == "FUMI":
return SAHPI_ET_FUMI
raise ValueError()
#**
# For SaHpiStatusCondTypeT
#**
def fromSaHpiStatusCondTypeT( x ):
if x == SAHPI_STATUS_COND_TYPE_SENSOR:
return "SENSOR"
if x == SAHPI_STATUS_COND_TYPE_RESOURCE:
return "RESOURCE"
if x == SAHPI_STATUS_COND_TYPE_OEM:
return "OEM"
if x == SAHPI_STATUS_COND_TYPE_USER:
return "USER"
return repr( x )
def toSaHpiStatusCondTypeT( s ):
if s == "SENSOR":
return SAHPI_STATUS_COND_TYPE_SENSOR
if s == "RESOURCE":
return SAHPI_STATUS_COND_TYPE_RESOURCE
if s == "OEM":
return SAHPI_STATUS_COND_TYPE_OEM
if s == "USER":
return SAHPI_STATUS_COND_TYPE_USER
raise ValueError()
#**
# For SaHpiAnnunciatorModeT
#**
def fromSaHpiAnnunciatorModeT( x ):
if x == SAHPI_ANNUNCIATOR_MODE_AUTO:
return "AUTO"
if x == SAHPI_ANNUNCIATOR_MODE_USER:
return "USER"
if x == SAHPI_ANNUNCIATOR_MODE_SHARED:
return "SHARED"
return repr( x )
def toSaHpiAnnunciatorModeT( s ):
if s == "AUTO":
return SAHPI_ANNUNCIATOR_MODE_AUTO
if s == "USER":
return SAHPI_ANNUNCIATOR_MODE_USER
if s == "SHARED":
return SAHPI_ANNUNCIATOR_MODE_SHARED
raise ValueError()
#**
# For SaHpiAnnunciatorTypeT
#**
def fromSaHpiAnnunciatorTypeT( x ):
if x == SAHPI_ANNUNCIATOR_TYPE_LED:
return "LED"
if x == SAHPI_ANNUNCIATOR_TYPE_DRY_CONTACT_CLOSURE:
return "DRY_CONTACT_CLOSURE"
if x == SAHPI_ANNUNCIATOR_TYPE_AUDIBLE:
return "AUDIBLE"
if x == SAHPI_ANNUNCIATOR_TYPE_LCD_DISPLAY:
return "LCD_DISPLAY"
if x == SAHPI_ANNUNCIATOR_TYPE_MESSAGE:
return "MESSAGE"
if x == SAHPI_ANNUNCIATOR_TYPE_COMPOSITE:
return "COMPOSITE"
if x == SAHPI_ANNUNCIATOR_TYPE_OEM:
return "OEM"
return repr( x )
def toSaHpiAnnunciatorTypeT( s ):
if s == "LED":
return SAHPI_ANNUNCIATOR_TYPE_LED
if s == "DRY_CONTACT_CLOSURE":
return SAHPI_ANNUNCIATOR_TYPE_DRY_CONTACT_CLOSURE
if s == "AUDIBLE":
return SAHPI_ANNUNCIATOR_TYPE_AUDIBLE
if s == "LCD_DISPLAY":
return SAHPI_ANNUNCIATOR_TYPE_LCD_DISPLAY
if s == "MESSAGE":
return SAHPI_ANNUNCIATOR_TYPE_MESSAGE
if s == "COMPOSITE":
return SAHPI_ANNUNCIATOR_TYPE_COMPOSITE
if s == "OEM":
return SAHPI_ANNUNCIATOR_TYPE_OEM
raise ValueError()
#**
# For SaHpiRdrTypeT
#**
def fromSaHpiRdrTypeT( x ):
if x == SAHPI_NO_RECORD:
return "NO_RECORD"
if x == SAHPI_CTRL_RDR:
return "CTRL_RDR"
if x == SAHPI_SENSOR_RDR:
return "SENSOR_RDR"
if x == SAHPI_INVENTORY_RDR:
return "INVENTORY_RDR"
if x == SAHPI_WATCHDOG_RDR:
return "WATCHDOG_RDR"
if x == SAHPI_ANNUNCIATOR_RDR:
return "ANNUNCIATOR_RDR"
if x == SAHPI_DIMI_RDR:
return "DIMI_RDR"
if x == SAHPI_FUMI_RDR:
return "FUMI_RDR"
return repr( x )
def toSaHpiRdrTypeT( s ):
if s == "NO_RECORD":
return SAHPI_NO_RECORD
if s == "CTRL_RDR":
return SAHPI_CTRL_RDR
if s == "SENSOR_RDR":
return SAHPI_SENSOR_RDR
if s == "INVENTORY_RDR":
return SAHPI_INVENTORY_RDR
if s == "WATCHDOG_RDR":
return SAHPI_WATCHDOG_RDR
if s == "ANNUNCIATOR_RDR":
return SAHPI_ANNUNCIATOR_RDR
if s == "DIMI_RDR":
return SAHPI_DIMI_RDR
if s == "FUMI_RDR":
return SAHPI_FUMI_RDR
raise ValueError()
#**
# For SaHpiParmActionT
#**
def fromSaHpiParmActionT( x ):
if x == SAHPI_DEFAULT_PARM:
return "DEFAULT_PARM"
if x == SAHPI_SAVE_PARM:
return "SAVE_PARM"
if x == SAHPI_RESTORE_PARM:
return "RESTORE_PARM"
return repr( x )
def toSaHpiParmActionT( s ):
if s == "DEFAULT_PARM":
return SAHPI_DEFAULT_PARM
if s == "SAVE_PARM":
return SAHPI_SAVE_PARM
if s == "RESTORE_PARM":
return SAHPI_RESTORE_PARM
raise ValueError()
#**
# For SaHpiResetActionT
#**
def fromSaHpiResetActionT( x ):
if x == SAHPI_COLD_RESET:
return "COLD_RESET"
if x == SAHPI_WARM_RESET:
return "WARM_RESET"
if x == SAHPI_RESET_ASSERT:
return "RESET_ASSERT"
if x == SAHPI_RESET_DEASSERT:
return "RESET_DEASSERT"
return repr( x )
def toSaHpiResetActionT( s ):
if s == "COLD_RESET":
return SAHPI_COLD_RESET
if s == "WARM_RESET":
return SAHPI_WARM_RESET
if s == "RESET_ASSERT":
return SAHPI_RESET_ASSERT
if s == "RESET_DEASSERT":
return SAHPI_RESET_DEASSERT
raise ValueError()
#**
# For SaHpiPowerStateT
#**
def fromSaHpiPowerStateT( x ):
if x == SAHPI_POWER_OFF:
return "OFF"
if x == SAHPI_POWER_ON:
return "ON"
if x == SAHPI_POWER_CYCLE:
return "CYCLE"
return repr( x )
def toSaHpiPowerStateT( s ):
if s == "OFF":
return SAHPI_POWER_OFF
if s == "ON":
return SAHPI_POWER_ON
if s == "CYCLE":
return SAHPI_POWER_CYCLE
raise ValueError()
#**
# For SaHpiEventLogOverflowActionT
#**
def fromSaHpiEventLogOverflowActionT( x ):
if x == SAHPI_EL_OVERFLOW_DROP:
return "DROP"
if x == SAHPI_EL_OVERFLOW_OVERWRITE:
return "OVERWRITE"
return repr( x )
def toSaHpiEventLogOverflowActionT( s ):
if s == "DROP":
return SAHPI_EL_OVERFLOW_DROP
if s == "OVERWRITE":
return SAHPI_EL_OVERFLOW_OVERWRITE
raise ValueError()
#**
# For AtcaHpiLedColorT
#**
def fromAtcaHpiLedColorT( x ):
if x == ATCAHPI_LED_COLOR_RESERVED:
return "RESERVED"
if x == ATCAHPI_LED_COLOR_BLUE:
return "BLUE"
if x == ATCAHPI_LED_COLOR_RED:
return "RED"
if x == ATCAHPI_LED_COLOR_GREEN:
return "GREEN"
if x == ATCAHPI_LED_COLOR_AMBER:
return "AMBER"
if x == ATCAHPI_LED_COLOR_ORANGE:
return "ORANGE"
if x == ATCAHPI_LED_COLOR_WHITE:
return "WHITE"
if x == ATCAHPI_LED_COLOR_NO_CHANGE:
return "NO_CHANGE"
if x == ATCAHPI_LED_COLOR_USE_DEFAULT:
return "USE_DEFAULT"
return repr( x )
def toAtcaHpiLedColorT( s ):
if s == "RESERVED":
return ATCAHPI_LED_COLOR_RESERVED
if s == "BLUE":
return ATCAHPI_LED_COLOR_BLUE
if s == "RED":
return ATCAHPI_LED_COLOR_RED
if s == "GREEN":
return ATCAHPI_LED_COLOR_GREEN
if s == "AMBER":
return ATCAHPI_LED_COLOR_AMBER
if s == "ORANGE":
return ATCAHPI_LED_COLOR_ORANGE
if s == "WHITE":
return ATCAHPI_LED_COLOR_WHITE
if s == "NO_CHANGE":
return ATCAHPI_LED_COLOR_NO_CHANGE
if s == "USE_DEFAULT":
return ATCAHPI_LED_COLOR_USE_DEFAULT
raise ValueError()
#**
# For AtcaHpiResourceLedModeT
#**
def fromAtcaHpiResourceLedModeT( x ):
if x == ATCAHPI_LED_AUTO:
return "AUTO"
if x == ATCAHPI_LED_MANUAL:
return "MANUAL"
if x == ATCAHPI_LED_LAMP_TEST:
return "LAMP_TEST"
return repr( x )
def toAtcaHpiResourceLedModeT( s ):
if s == "AUTO":
return ATCAHPI_LED_AUTO
if s == "MANUAL":
return ATCAHPI_LED_MANUAL
if s == "LAMP_TEST":
return ATCAHPI_LED_LAMP_TEST
raise ValueError()
#**
# For AtcaHpiLedBrSupportT
#**
def fromAtcaHpiLedBrSupportT( x ):
if x == ATCAHPI_LED_BR_SUPPORTED:
return "SUPPORTED"
if x == ATCAHPI_LED_BR_NOT_SUPPORTED:
return "NOT_SUPPORTED"
if x == ATCAHPI_LED_BR_UNKNOWN:
return "UNKNOWN"
return repr( x )
def toAtcaHpiLedBrSupportT( s ):
if s == "SUPPORTED":
return ATCAHPI_LED_BR_SUPPORTED
if s == "NOT_SUPPORTED":
return ATCAHPI_LED_BR_NOT_SUPPORTED
if s == "UNKNOWN":
return ATCAHPI_LED_BR_UNKNOWN
raise ValueError()
#**
# For XtcaHpiLedColorT
#**
def fromXtcaHpiLedColorT( x ):
if x == XTCAHPI_LED_COLOR_RESERVED:
return "RESERVED"
if x == XTCAHPI_LED_COLOR_BLUE:
return "BLUE"
if x == XTCAHPI_LED_COLOR_RED:
return "RED"
if x == XTCAHPI_LED_COLOR_GREEN:
return "GREEN"
if x == XTCAHPI_LED_COLOR_AMBER:
return "AMBER"
if x == XTCAHPI_LED_COLOR_ORANGE:
return "ORANGE"
if x == XTCAHPI_LED_COLOR_WHITE:
return "WHITE"
if x == XTCAHPI_LED_COLOR_NO_CHANGE:
return "NO_CHANGE"
if x == XTCAHPI_LED_COLOR_USE_DEFAULT:
return "USE_DEFAULT"
return repr( x )
def toXtcaHpiLedColorT( s ):
if s == "RESERVED":
return XTCAHPI_LED_COLOR_RESERVED
if s == "BLUE":
return XTCAHPI_LED_COLOR_BLUE
if s == "RED":
return XTCAHPI_LED_COLOR_RED
if s == "GREEN":
return XTCAHPI_LED_COLOR_GREEN
if s == "AMBER":
return XTCAHPI_LED_COLOR_AMBER
if s == "ORANGE":
return XTCAHPI_LED_COLOR_ORANGE
if s == "WHITE":
return XTCAHPI_LED_COLOR_WHITE
if s == "NO_CHANGE":
return XTCAHPI_LED_COLOR_NO_CHANGE
if s == "USE_DEFAULT":
return XTCAHPI_LED_COLOR_USE_DEFAULT
raise ValueError()
#**
# For XtcaHpiResourceLedModeT
#**
def fromXtcaHpiResourceLedModeT( x ):
if x == XTCAHPI_LED_AUTO:
return "AUTO"
if x == XTCAHPI_LED_MANUAL:
return "MANUAL"
if x == XTCAHPI_LED_LAMP_TEST:
return "LAMP_TEST"
return repr( x )
def toXtcaHpiResourceLedModeT( s ):
if s == "AUTO":
return XTCAHPI_LED_AUTO
if s == "MANUAL":
return XTCAHPI_LED_MANUAL
if s == "LAMP_TEST":
return XTCAHPI_LED_LAMP_TEST
raise ValueError()
#**
# For XtcaHpiLedBrSupportT
#**
def fromXtcaHpiLedBrSupportT( x ):
if x == XTCAHPI_LED_BR_SUPPORTED:
return "SUPPORTED"
if x == XTCAHPI_LED_BR_NOT_SUPPORTED:
return "NOT_SUPPORTED"
if x == XTCAHPI_LED_BR_UNKNOWN:
return "UNKNOWN"
return repr( x )
def toXtcaHpiLedBrSupportT( s ):
if s == "SUPPORTED":
return XTCAHPI_LED_BR_SUPPORTED
if s == "NOT_SUPPORTED":
return XTCAHPI_LED_BR_NOT_SUPPORTED
if s == "UNKNOWN":
return XTCAHPI_LED_BR_UNKNOWN
raise ValueError()
#**
# For SaErrorT
#**
def fromSaErrorT( x ):
if x == SA_ERR_HPI_OK:
return "OK"
if x == SA_ERR_HPI_ERROR:
return "ERROR"
if x == SA_ERR_HPI_UNSUPPORTED_API:
return "UNSUPPORTED_API"
if x == SA_ERR_HPI_BUSY:
return "BUSY"
if x == SA_ERR_HPI_INTERNAL_ERROR:
return "INTERNAL_ERROR"
if x == SA_ERR_HPI_INVALID_CMD:
return "INVALID_CMD"
if x == SA_ERR_HPI_TIMEOUT:
return "TIMEOUT"
if x == SA_ERR_HPI_OUT_OF_SPACE:
return "OUT_OF_SPACE"
if x == SA_ERR_HPI_OUT_OF_MEMORY:
return "OUT_OF_MEMORY"
if x == SA_ERR_HPI_INVALID_PARAMS:
return "INVALID_PARAMS"
if x == SA_ERR_HPI_INVALID_DATA:
return "INVALID_DATA"
if x == SA_ERR_HPI_NOT_PRESENT:
return "NOT_PRESENT"
if x == SA_ERR_HPI_NO_RESPONSE:
return "NO_RESPONSE"
if x == SA_ERR_HPI_DUPLICATE:
return "DUPLICATE"
if x == SA_ERR_HPI_INVALID_SESSION:
return "INVALID_SESSION"
if x == SA_ERR_HPI_INVALID_DOMAIN:
return "INVALID_DOMAIN"
if x == SA_ERR_HPI_INVALID_RESOURCE:
return "INVALID_RESOURCE"
if x == SA_ERR_HPI_INVALID_REQUEST:
return "INVALID_REQUEST"
if x == SA_ERR_HPI_ENTITY_NOT_PRESENT:
return "ENTITY_NOT_PRESENT"
if x == SA_ERR_HPI_READ_ONLY:
return "READ_ONLY"
if x == SA_ERR_HPI_CAPABILITY:
return "CAPABILITY"
if x == SA_ERR_HPI_UNKNOWN:
return "UNKNOWN"
if x == SA_ERR_HPI_INVALID_STATE:
return "INVALID_STATE"
if x == SA_ERR_HPI_UNSUPPORTED_PARAMS:
return "UNSUPPORTED_PARAMS"
return repr( x )
def toSaErrorT( s ):
if s == "OK":
return SA_ERR_HPI_OK
if s == "ERROR":
return SA_ERR_HPI_ERROR
if s == "UNSUPPORTED_API":
return SA_ERR_HPI_UNSUPPORTED_API
if s == "BUSY":
return SA_ERR_HPI_BUSY
if s == "INTERNAL_ERROR":
return SA_ERR_HPI_INTERNAL_ERROR
if s == "INVALID_CMD":
return SA_ERR_HPI_INVALID_CMD
if s == "TIMEOUT":
return SA_ERR_HPI_TIMEOUT
if s == "OUT_OF_SPACE":
return SA_ERR_HPI_OUT_OF_SPACE
if | |
by the EPO or use --all if you intend to"
" shutdown the whole cluster", node)
return constants.EXIT_FAILURE
elif powered is None:
_stdout_fn("Node %s does not support out-of-band handling, it can not be"
" handled in a fully automated manner", node)
elif powered == opts.on:
_stdout_fn("Node %s is already in desired power state, skipping", node)
elif not offline or (offline and powered):
node_list.append(node)
if not (opts.force or _confirm_fn(all_nodes, "nodes", "epo")):
return constants.EXIT_FAILURE
if opts.on:
return _on_fn(opts, all_nodes, node_list, inst_map)
else:
return _off_fn(opts, node_list, inst_map)
def _GetCreateCommand(info):
buf = StringIO()
buf.write("gnt-cluster init")
PrintIPolicyCommand(buf, info["ipolicy"], False)
buf.write(" ")
buf.write(info["name"])
return buf.getvalue()
def ShowCreateCommand(opts, args):
"""Shows the command that can be used to re-create the cluster.
Currently it works only for ipolicy specs.
"""
cl = GetClient()
result = cl.QueryClusterInfo()
ToStdout(_GetCreateCommand(result))
def _RunCommandAndReport(cmd):
"""Run a command and report its output, iff it failed.
@param cmd: the command to execute
@type cmd: list
@rtype: bool
@return: False, if the execution failed.
"""
result = utils.RunCmd(cmd)
if result.failed:
ToStderr("Command %s failed: %s; Output %s" %
(cmd, result.fail_reason, result.output))
return False
return True
def _VerifyCommand(cmd):
"""Verify that a given command succeeds on all online nodes.
As this function is intended to run during upgrades, it
is implemented in such a way that it still works, if all Ganeti
daemons are down.
@param cmd: the command to execute
@type cmd: list
@rtype: list
@return: the list of node names that are online where
the command failed.
"""
command = utils.text.ShellQuoteArgs([str(val) for val in cmd])
nodes = ssconf.SimpleStore().GetOnlineNodeList()
master_node = ssconf.SimpleStore().GetMasterNode()
cluster_name = ssconf.SimpleStore().GetClusterName()
# If master node is in 'nodes', make sure master node is at list end
if master_node in nodes:
nodes.remove(master_node)
nodes.append(master_node)
failed = []
srun = ssh.SshRunner(cluster_name=cluster_name)
for name in nodes:
result = srun.Run(name, constants.SSH_LOGIN_USER, command)
if result.exit_code != 0:
failed.append(name)
return failed
def _VerifyVersionInstalled(versionstring):
"""Verify that the given version of ganeti is installed on all online nodes.
Do nothing, if this is the case, otherwise print an appropriate
message to stderr.
@param versionstring: the version to check for
@type versionstring: string
@rtype: bool
@return: True, if the version is installed on all online nodes
"""
badnodes = _VerifyCommand(["test", "-d",
os.path.join(pathutils.PKGLIBDIR, versionstring)])
if badnodes:
ToStderr("Ganeti version %s not installed on nodes %s"
% (versionstring, ", ".join(badnodes)))
return False
return True
def _GetRunning():
"""Determine the list of running jobs.
@rtype: list
@return: the number of jobs still running
"""
cl = GetClient()
qfilter = qlang.MakeSimpleFilter("status",
frozenset([constants.JOB_STATUS_RUNNING]))
return len(cl.Query(constants.QR_JOB, [], qfilter).data)
def _SetGanetiVersion(versionstring):
"""Set the active version of ganeti to the given versionstring
@type versionstring: string
@rtype: list
@return: the list of nodes where the version change failed
"""
failed = []
if constants.HAS_GNU_LN:
failed.extend(_VerifyCommand(
["ln", "-s", "-f", "-T",
os.path.join(pathutils.PKGLIBDIR, versionstring),
os.path.join(pathutils.SYSCONFDIR, "ganeti/lib")]))
failed.extend(_VerifyCommand(
["ln", "-s", "-f", "-T",
os.path.join(pathutils.SHAREDIR, versionstring),
os.path.join(pathutils.SYSCONFDIR, "ganeti/share")]))
else:
failed.extend(_VerifyCommand(
["rm", "-f", os.path.join(pathutils.SYSCONFDIR, "ganeti/lib")]))
failed.extend(_VerifyCommand(
["ln", "-s", "-f", os.path.join(pathutils.PKGLIBDIR, versionstring),
os.path.join(pathutils.SYSCONFDIR, "ganeti/lib")]))
failed.extend(_VerifyCommand(
["rm", "-f", os.path.join(pathutils.SYSCONFDIR, "ganeti/share")]))
failed.extend(_VerifyCommand(
["ln", "-s", "-f", os.path.join(pathutils.SHAREDIR, versionstring),
os.path.join(pathutils.SYSCONFDIR, "ganeti/share")]))
return list(set(failed))
def _ExecuteCommands(fns):
"""Execute a list of functions, in reverse order.
@type fns: list of functions.
@param fns: the functions to be executed.
"""
for fn in reversed(fns):
fn()
def _GetConfigVersion():
"""Determine the version the configuration file currently has.
@rtype: tuple or None
@return: (major, minor, revision) if the version can be determined,
None otherwise
"""
config_data = serializer.LoadJson(utils.ReadFile(pathutils.CLUSTER_CONF_FILE))
try:
config_version = config_data["version"]
except KeyError:
return None
return utils.SplitVersion(config_version)
def _ReadIntentToUpgrade():
"""Read the file documenting the intent to upgrade the cluster.
@rtype: (string, string) or (None, None)
@return: (old version, version to upgrade to), if the file exists,
and (None, None) otherwise.
"""
if not os.path.isfile(pathutils.INTENT_TO_UPGRADE):
return (None, None)
contentstring = utils.ReadFile(pathutils.INTENT_TO_UPGRADE)
contents = utils.UnescapeAndSplit(contentstring)
if len(contents) != 3:
# file syntactically mal-formed
return (None, None)
return (contents[0], contents[1])
def _WriteIntentToUpgrade(version):
"""Write file documenting the intent to upgrade the cluster.
@type version: string
@param version: the version we intent to upgrade to
"""
utils.WriteFile(pathutils.INTENT_TO_UPGRADE,
data=utils.EscapeAndJoin([constants.RELEASE_VERSION, version,
"%d" % os.getpid()]))
def _UpgradeBeforeConfigurationChange(versionstring):
"""
Carry out all the tasks necessary for an upgrade that happen before
the configuration file, or Ganeti version, changes.
@type versionstring: string
@param versionstring: the version to upgrade to
@rtype: (bool, list)
@return: tuple of a bool indicating success and a list of rollback tasks
"""
rollback = []
if not _VerifyVersionInstalled(versionstring):
return (False, rollback)
_WriteIntentToUpgrade(versionstring)
rollback.append(
lambda: utils.RunCmd(["rm", "-f", pathutils.INTENT_TO_UPGRADE]))
ToStdout("Draining queue")
client = GetClient()
client.SetQueueDrainFlag(True)
rollback.append(lambda: GetClient().SetQueueDrainFlag(False))
if utils.SimpleRetry(0, _GetRunning,
constants.UPGRADE_QUEUE_POLL_INTERVAL,
constants.UPGRADE_QUEUE_DRAIN_TIMEOUT):
ToStderr("Failed to completely empty the queue.")
return (False, rollback)
ToStdout("Pausing the watcher for one hour.")
rollback.append(lambda: GetClient().SetWatcherPause(None))
GetClient().SetWatcherPause(time.time() + 60 * 60)
ToStdout("Stopping daemons on master node.")
if not _RunCommandAndReport([pathutils.DAEMON_UTIL, "stop-all"]):
return (False, rollback)
if not _VerifyVersionInstalled(versionstring):
utils.RunCmd([pathutils.DAEMON_UTIL, "start-all"])
return (False, rollback)
ToStdout("Stopping daemons everywhere.")
rollback.append(lambda: _VerifyCommand([pathutils.DAEMON_UTIL, "start-all"]))
badnodes = _VerifyCommand([pathutils.DAEMON_UTIL, "stop-all"])
if badnodes:
ToStderr("Failed to stop daemons on %s." % (", ".join(badnodes),))
return (False, rollback)
backuptar = os.path.join(pathutils.BACKUP_DIR, "ganeti%d.tar" % time.time())
ToStdout("Backing up configuration as %s" % backuptar)
if not _RunCommandAndReport(["mkdir", "-p", pathutils.BACKUP_DIR]):
return (False, rollback)
# Create the archive in a safe manner, as it contains sensitive
# information.
(_, tmp_name) = tempfile.mkstemp(prefix=backuptar, dir=pathutils.BACKUP_DIR)
if not _RunCommandAndReport(["tar", "-cf", tmp_name,
"--exclude=queue/archive",
pathutils.DATA_DIR]):
return (False, rollback)
os.rename(tmp_name, backuptar)
return (True, rollback)
def _VersionSpecificDowngrade():
"""
Perform any additional downrade tasks that are version specific
and need to be done just after the configuration downgrade. This
function needs to be idempotent, so that it can be redone if the
downgrade procedure gets interrupted after changing the
configuration.
Note that this function has to be reset with every version bump.
@return: True upon success
"""
ToStdout("Performing version-specific downgrade tasks.")
return True
def _SwitchVersionAndConfig(versionstring, downgrade):
"""
Switch to the new Ganeti version and change the configuration,
in correct order.
@type versionstring: string
@param versionstring: the version to change to
@type downgrade: bool
@param downgrade: True, if the configuration should be downgraded
@rtype: (bool, list)
@return: tupe of a bool indicating success, and a list of
additional rollback tasks
"""
rollback = []
if downgrade:
ToStdout("Downgrading configuration")
if not _RunCommandAndReport([pathutils.CFGUPGRADE, "--downgrade", "-f"]):
return (False, rollback)
# Note: version specific downgrades need to be done before switching
# binaries, so that we still have the knowledgeable binary if the downgrade
# process gets interrupted at this point.
if not _VersionSpecificDowngrade():
return (False, rollback)
# Configuration change is the point of no return. From then onwards, it is
# safer to push through the up/dowgrade than to try to roll it back.
ToStdout("Switching to version %s on all nodes" % versionstring)
rollback.append(lambda: _SetGanetiVersion(constants.DIR_VERSION))
badnodes = _SetGanetiVersion(versionstring)
if badnodes:
ToStderr("Failed to switch to Ganeti version %s on nodes %s"
% (versionstring, ", ".join(badnodes)))
if not downgrade:
return (False, rollback)
# Now that we have changed to the new version of Ganeti we should
# not communicate over luxi any more, as luxi might have changed in
# incompatible ways. Therefore, manually call the corresponding ganeti
# commands using their canonical (version independent) path.
if not downgrade:
ToStdout("Upgrading configuration")
if not _RunCommandAndReport([pathutils.CFGUPGRADE, "-f"]):
return (False, rollback)
return (True, rollback)
def _UpgradeAfterConfigurationChange(oldversion):
"""
Carry out the upgrade actions necessary after switching to the new
Ganeti version and updating the configuration.
As this part is run at a time where the new version of Ganeti is already
running, no communication should happen via luxi, as this is not a stable
interface. Also, as the configuration change is the point of no return,
all actions are pushed trough, even if some of them fail.
@param oldversion: the version the upgrade started from
@type oldversion: string
@rtype: int
@return: the intended return value
"""
returnvalue = 0
ToStdout("Ensuring directories everywhere.")
badnodes = _VerifyCommand([pathutils.ENSURE_DIRS])
if badnodes:
ToStderr("Warning: failed to ensure directories on %s." %
(", ".join(badnodes)))
returnvalue = 1
ToStdout("Starting daemons everywhere.")
badnodes = _VerifyCommand([pathutils.DAEMON_UTIL, "start-all"])
if badnodes:
ToStderr("Warning: failed to start daemons on %s." % (", ".join(badnodes),))
returnvalue = 1
ToStdout("Redistributing the configuration.")
if not _RunCommandAndReport(["gnt-cluster", "redist-conf", "--yes-do-it"]):
returnvalue = 1
ToStdout("Restarting daemons everywhere.")
badnodes = _VerifyCommand([pathutils.DAEMON_UTIL, "stop-all"])
| |
from ciw.auxiliary import *
from itertools import cycle
import copy
from operator import add, mul, sub, truediv
from random import (expovariate, uniform, triangular, gammavariate,
lognormvariate, weibullvariate)
class Distribution(object):
"""
A general distribution from which all other distirbutions will inherit.
"""
def __repr__(self):
return 'Distribution'
def sample(self, t=None, ind=None):
pass
def _sample(self, t=None, ind=None):
"""
Performs vaildity checks before sampling.
"""
s = self.sample(t=t, ind=ind)
if (isinstance(s, float) or isinstance(s, int)) and s >= 0:
return s
else:
raise ValueError('Invalid time sampled.')
def __add__(self, dist):
"""
Add two distributions such that sampling is the sum of the samples.
"""
return CombinedDistribution(self, dist, add)
def __sub__(self, dist):
"""
Subtract two distributions such that sampling is the difference of the samples.
"""
return CombinedDistribution(self, dist, sub)
def __mul__(self, dist):
"""
Multiply two distributions such that sampling is the product of the samples.
"""
return CombinedDistribution(self, dist, mul)
def __truediv__(self, dist):
"""
Divide two distributions such that sampling is the ratio of the samples.
"""
return CombinedDistribution(self, dist, truediv)
class CombinedDistribution(Distribution):
"""
A distribution that combines the samples of two other distributions, `dist1`
and `dist2`, using `operator`.
"""
def __init__(self, dist1, dist2, operator):
self.d1 = copy.deepcopy(dist1)
self.d2 = copy.deepcopy(dist2)
self.operator = operator
def __repr__(self):
return 'CombinedDistribution'
def sample(self, t=None, ind=None):
s1 = self.d1.sample()
s2 = self.d2.sample()
return self.operator(s1, s2)
class Uniform(Distribution):
"""
The Uniform distribution.
Takes:
- `lower` the lower bound
- `upper` the upper bound
"""
def __init__(self, lower, upper):
if lower < 0.0 or upper < 0.0:
raise ValueError('Uniform distribution must sample positive numbers only.')
if upper < lower:
raise ValueError('Uniform distirbution upper bound should be >= lower bound.')
self.lower = lower
self.upper = upper
def __repr__(self):
return 'Uniform: {0}, {1}'.format(self.lower, self.upper)
def sample(self, t=None, ind=None):
return uniform(self.lower, self.upper)
class Deterministic(Distribution):
"""
The Deterministic distribution.
Takes:
- `value` the value to return
"""
def __init__(self, value):
if value < 0.0:
raise ValueError('Deterministic distribution must sample positive numbers only.')
self.value = value
def __repr__(self):
return 'Deterministic: {0}'.format(self.value)
def sample(self, t=None, ind=None):
return self.value
class Triangular(Distribution):
"""
The Triangular distribution.
Takes:
- `lower` the lower bound
- `upper` the upper bound
- `mode` the modal value
"""
def __init__(self, lower, mode, upper):
if lower < 0.0 or upper < 0.0 or mode < 0.0:
raise ValueError('Triangular distribution must sample positive numbers only.')
if not lower <= mode <= upper:
raise ValueError('Triangular distribution lower bound must be <= mode must be <= upper bound.')
self.lower = lower
self.mode = mode
self.upper = upper
def __repr__(self):
return 'Triangular: {0}, {1}, {2}'.format(self.lower, self.mode, self.upper)
def sample(self, t=None, ind=None):
return triangular(self.lower, self.upper, self.mode)
class Exponential(Distribution):
"""
The Exponential distribution.
Takes:
- `rate` the rate parameter, lambda
"""
def __init__(self, rate):
if rate <= 0.0:
raise ValueError('Exponential distribution must sample positive numbers only.')
self.rate = rate
def __repr__(self):
return 'Exponential: {0}'.format(self.rate)
def sample(self, t=None, ind=None):
return expovariate(self.rate)
class Gamma(Distribution):
"""
The Gamma distribution.
Takes:
- `shape` the shape parameter, alpha
- `scale` the scale parameter, beta
"""
def __init__(self, shape, scale):
self.shape = shape
self.scale = scale
def __repr__(self):
return 'Gamma: {0}, {1}'.format(self.shape, self.scale)
def sample(self, t=None, ind=None):
return gammavariate(self.shape, self.scale)
class Normal(Distribution):
"""
The Truncated Normal distribution.
Takes:
- `mean` the mean of the Normal, mu
- `sd` the standard deviation of the Normal, sigma
"""
def __init__(self, mean, sd):
self.mean = mean
self.sd = sd
def __repr__(self):
return 'Normal: {0}, {1}'.format(self.mean, self.sd)
def sample(self, t=None, ind=None):
return truncated_normal(self.mean, self.sd)
class Lognormal(Distribution):
"""
The Lognormal distribution.
Takes:
- `mean` the mean of the Normal, mu
- `sd` the standard deviation of the Normal, sigma
"""
def __init__(self, mean, sd):
self.mean = mean
self.sd = sd
def __repr__(self):
return 'Lognormal: {0}, {1}'.format(self.mean, self.sd)
def sample(self, t=None, ind=None):
return lognormvariate(self.mean, self.sd)
class Weibull(Distribution):
"""
The Weibull distribution.
Takes:
- `scale` the scale parameter, alpha
- `shape` the shape parameter, beta
"""
def __init__(self, scale, shape):
self.scale = scale
self.shape = shape
def __repr__(self):
return 'Weibull: {0}, {1}'.format(self.scale, self.shape)
def sample(self, t=None, ind=None):
return weibullvariate(self.scale, self.shape)
class Empirical(Distribution):
"""
The Empirical distribution.
Takes:
- `observations` the observations from which to sample
"""
def __init__(self, observations):
if any(o < 0 for o in observations):
raise ValueError('Empirical distribution must sample positive numbers only.')
self.observations = observations
def __repr__(self):
return 'Empirical'
def sample(self, t=None, ind=None):
return random_choice(self.observations)
class Sequential(Distribution):
"""
The Sequential distribution.
Takes:
- `sequence` the sequence to cycle through
"""
def __init__(self, sequence):
if any(o < 0 for o in sequence):
raise ValueError('Sequential distribution must sample positive numbers only.')
self.sequence = sequence
self.generator = cycle(self.sequence)
def __repr__(self):
return 'Sequential'
def sample(self, t=None, ind=None):
return next(self.generator)
class Pmf(Distribution):
"""
A distribution defined by a probability mass function (pmf).
Takes:
- `values` the values to sample
- `probs` the associated probabilities
"""
def __init__(self, values, probs):
if any(o < 0 for o in values):
raise ValueError('Pmf must sample positive numbers only.')
if any(p < 0 or p > 1.0 for p in probs):
raise ValueError('Pmf must have valid probabilities.')
if sum(probs) != 1.0:
raise ValueError('Pmf probabilities must sum to 1.0.')
self.values = values
self.probs = probs
def __repr__(self):
return 'Pmf'
def sample(self, t=None, ind=None):
return random_choice(self.values, self.probs)
class PhaseType(Distribution):
"""
A distribution defined by an initial vector and an absorbing Markov chain
Takes:
- `initial_state` the intial probabilities of being in each state
- `absorbing_matrix` the martix representation of the absorbing Markov
chain, with the final state the absorbing state
"""
def __init__(self, initial_state, absorbing_matrix):
if any(p < 0 or p > 1.0 for p in initial_state):
raise ValueError('Initial state vector must have valid probabilities.')
if sum(initial_state) > 1.0 + 10**(-10) or sum(initial_state) < 1.0 - 10**(-10):
raise ValueError('Initial state vector probabilities must sum to 1.0.')
if any(len(absorbing_matrix) != len(row) for row in absorbing_matrix):
raise ValueError('Matrix of the absorbing Markov chain must be square.')
if len(initial_state) != len(absorbing_matrix):
raise ValueError('Initial state vector must have same number of states as absorbing Markov chain matrix.')
if any(row[j] < 0 for i, row in enumerate(absorbing_matrix) for j in range(len(absorbing_matrix)) if i != j):
raise ValueError('Transition rates must be positive.')
if not all(-(10**(-10)) < sum(row) < 10**(-10) for i, row in enumerate(absorbing_matrix)):
raise ValueError('Matrix rows must sum to 0.')
if not all(r == 0 for r in absorbing_matrix[-1]):
raise ValueError('Final state must be the absorbing state.')
if not any(row[-1] > 0 for row in absorbing_matrix):
raise ValueError('Must be possible to reach the absorbing state.')
self.initial_state = initial_state
self.states = tuple(range(len(initial_state)))
self.absorbing_matrix = absorbing_matrix
def __repr__(self):
return 'PhaseType'
def sample_transition(self, rate):
if rate <= 0.0:
return float('Inf')
return expovariate(rate)
def sample(self, t=None, ind=None):
cumulative_time = 0
current_state = random_choice(self.states, probs=self.initial_state)
while current_state != self.states[-1]:
potential_transitions = [self.sample_transition(r) for r in self.absorbing_matrix[current_state]]
time, idx = min((time, idx) for (idx, time) in enumerate(potential_transitions))
cumulative_time += time
current_state = idx
return cumulative_time
class Erlang(PhaseType):
"""
An shortcut for the Erlang distribution, using the PhaseType distribution
Takes:
- `rate` the rate spent in each phase
- `num_phases` the number of phases in series
"""
def __init__(self, rate, num_phases):
if rate <= 0.0:
raise ValueError('Rate must be positive.')
if num_phases < 1:
raise ValueError('At least one phase is required.')
self.rate = rate
self.num_phases = num_phases
initial_state = [1] + [0] * num_phases
absorbing_matrix = [[0] * (num_phases + 1) for _ in range(num_phases + 1)]
for phase in range(num_phases):
absorbing_matrix[phase][phase] = -self.rate
absorbing_matrix[phase][phase + 1] = self.rate
super().__init__(initial_state, absorbing_matrix)
def __repr__(self):
return f'Erlang: {self.rate}, {self.num_phases}'
class HyperExponential(PhaseType):
"""
A shortcut for the HyperExponential distribution, using the PhaseType distribution
Takes:
- `rates` a vector of rates for each phase
- `probs` a probability vector for starting in each phase
"""
def __init__(self, rates, probs):
if any(r <= 0.0 for r in rates):
raise ValueError('Rates must be positive.')
initial_state = probs + [0]
num_phases = len(probs)
absorbing_matrix = [[0] * (num_phases + 1) for _ in range(num_phases + 1)]
for phase in range(num_phases):
absorbing_matrix[phase][phase] = -rates[phase]
absorbing_matrix[phase][num_phases] = rates[phase]
super().__init__(initial_state, absorbing_matrix)
def __repr__(self):
return "HyperExponential"
class HyperErlang(PhaseType):
| |
<reponame>pmassolino/hw-sike
import random;
def print_value_VHDL_memory(file, word_size, value, final_size, fill_value):
file.write((("{0:0"+str(word_size)+"b}").format(value)))
file.write('\n')
final_size = final_size - 1
if(final_size > 0):
for i in range (final_size):
file.write((("{0:0"+str(word_size)+"b}").format(fill_value)))
file.write('\n')
def load_value_unsigned_VHDL_memory(file, word_size, final_size):
value = int(file.read(word_size), base=2)
file.read(1) # throw away the \n
final_size = final_size - 1
if(final_size > 0):
for i in range (final_size):
file.read(word_size + 1) # throw away the filling
return value
def load_value_signed_VHDL_memory(file, word_size, final_size):
maximum_positive_value = 2**(word_size - 1) - 1
word_full_of_ones = 2**word_size - 1
value = int(file.read(word_size), base=2)
if(value > maximum_positive_value):
value = -((value ^ word_full_of_ones) + 1)
file.read(1) # throw away the \n
final_size = final_size - 1
if(final_size > 0):
for i in range (final_size):
file.read(word_size + 1) # throw away the filling
return value
# Rotation functions obtained from
# https://www.falatic.com/index.php/108/python-and-bitwise-rotation
rol = lambda val, r_bits, max_bits: (val << r_bits%max_bits) & (2**max_bits-1) | ((val & (2**max_bits-1)) >> (max_bits-(r_bits%max_bits)))
ror = lambda val, r_bits, max_bits: ((val & (2**max_bits-1)) >> r_bits%max_bits) | (val << (max_bits-(r_bits%max_bits)) & (2**max_bits-1))
shl = lambda val, s_bits, max_bits : ((val << s_bits) & (2**max_bits-1))
shr = lambda val, s_bits, max_bits : ((val & (2**max_bits-1)) >> s_bits)
def print_simple_unsigned_test(file, a, b, rotation_amount, word_size):
word_full_of_ones = 2**word_size - 1
double_word_full_of_ones = 2**(2*word_size) - 1
print_value_VHDL_memory(file, word_size, a, 1, 0)
print_value_VHDL_memory(file, word_size, b, 1, 0)
print_value_VHDL_memory(file, word_size, rotation_amount, 1, 0)
o = (b+a)&word_full_of_ones
print_value_VHDL_memory(file, word_size, o, 1, 0)
o = (b-a)&word_full_of_ones
print_value_VHDL_memory(file, word_size, o, 1, 0)
o = (b*a)&(double_word_full_of_ones)
print_value_VHDL_memory(file, 2*word_size, o, 1, 0)
o = shr(b, rotation_amount, word_size)
print_value_VHDL_memory(file, word_size, o, 1, 0)
o = ror(b, rotation_amount, word_size)
print_value_VHDL_memory(file, word_size, o, 1, 0)
o = shl(b, rotation_amount, word_size)
print_value_VHDL_memory(file, word_size, o, 1, 0)
o = rol(b, rotation_amount, word_size)
print_value_VHDL_memory(file, word_size, o, 1, 0)
if(a == b):
o = 1
else:
o = 0
print_value_VHDL_memory(file, word_size, o, 1, 0)
if(a > b):
o = 1
else:
o = 0
print_value_VHDL_memory(file, word_size, o, 1, 0)
o = a & b
print_value_VHDL_memory(file, word_size, o, 1, 0)
o = a | b
print_value_VHDL_memory(file, word_size, o, 1, 0)
o = a ^ b
print_value_VHDL_memory(file, word_size, o, 1, 0)
def print_simple_signed_test(file, a, b, rotation_amount, word_size):
word_full_of_ones = 2**(word_size) - 1
double_word_full_of_ones = 2**(2*word_size) - 1
print_value_VHDL_memory(file, word_size, a & word_full_of_ones, 1, 0)
print_value_VHDL_memory(file, word_size, b & word_full_of_ones, 1, 0)
print_value_VHDL_memory(file, word_size, rotation_amount, 1, 0)
o = (b+a)&word_full_of_ones
print_value_VHDL_memory(file, word_size, o, 1, 0)
o = (b-a)&word_full_of_ones
print_value_VHDL_memory(file, word_size, o, 1, 0)
o = (b*a)&(double_word_full_of_ones)
print_value_VHDL_memory(file, 2*word_size, o, 1, 0)
o = shr(b, rotation_amount, word_size)
print_value_VHDL_memory(file, word_size, o, 1, 0)
o = ror(b, rotation_amount, word_size)
print_value_VHDL_memory(file, word_size, o, 1, 0)
o = shl(b, rotation_amount, word_size)
print_value_VHDL_memory(file, word_size, o, 1, 0)
o = rol(b, rotation_amount, word_size)
print_value_VHDL_memory(file, word_size, o, 1, 0)
if(a == b):
o = 1
else:
o = 0
print_value_VHDL_memory(file, word_size, o, 1, 0)
if(a > b):
o = 1
else:
o = 0
print_value_VHDL_memory(file, word_size, o, 1, 0)
o = (a & b)&word_full_of_ones
print_value_VHDL_memory(file, word_size, o, 1, 0)
o = (a | b)&word_full_of_ones
print_value_VHDL_memory(file, word_size, o, 1, 0)
o = (a ^ b)&word_full_of_ones
print_value_VHDL_memory(file, word_size, o, 1, 0)
def print_all_tests_unsigned(filename, word_size, number_of_tests):
test_file = open(filename, 'w')
test_file.write((("{0:0d}").format(number_of_tests)))
test_file.write('\n')
# Maximum tests
max_value = 2**word_size
max_rotation = word_size
maximum_tests = [0, 1, 2, 3, 4, max_value - 4, max_value - 3, max_value - 2, max_value - 1]
maximum_tests_rotation = [0, 1, 2, 3, 4, max_rotation - 4, max_rotation - 3, max_rotation - 2, max_rotation - 1]
for test_value_b in maximum_tests:
for test_value_a, test_rotation in zip(maximum_tests, maximum_tests_rotation):
print_simple_unsigned_test(test_file, test_value_a, test_value_b, test_rotation, word_size)
number_of_tests -= 1
for i in range(number_of_tests):
test_value_a = random.randint(0, max_value-1)
test_value_b = random.randint(0, max_value-1)
test_rotation = random.randint(0, max_rotation-1)
print_simple_unsigned_test(test_file, test_value_a, test_value_b, test_rotation, word_size)
test_file.close()
def print_all_tests_signed(filename, word_size, number_of_tests):
test_file = open(filename, 'w')
test_file.write((("{0:0d}").format(number_of_tests)))
test_file.write('\n')
# Maximum tests
max_value = 2**(word_size-1)
max_rotation = word_size
maximum_tests = [-max_value, -max_value + 1, -max_value + 2, 0, 1, 2, max_value - 3, max_value - 2, max_value - 1]
maximum_tests_rotation = [0, 1, 2, 3, 4, max_rotation - 4, max_rotation - 3, max_rotation - 2, max_rotation - 1]
for test_value_b in maximum_tests:
for test_value_a, test_rotation in zip(maximum_tests, maximum_tests_rotation):
print_simple_signed_test(test_file, test_value_a, test_value_b, test_rotation, word_size)
number_of_tests -= 1
for i in range(number_of_tests):
test_value_a = random.randint(-max_value, max_value-1)
test_value_b = random.randint(-max_value, max_value-1)
test_rotation = random.randint(0, max_rotation-1)
print_simple_signed_test(test_file, test_value_a, test_value_b, test_rotation, word_size)
test_file.close()
def load_all_tests_unsigned(filename, word_size):
word_full_of_ones = 2**(word_size) - 1
double_word_full_of_ones = 2**(2*word_size) - 1
test_file = open(filename, 'r')
test_file.seek(0, 2)
tes_file_size = test_file.tell()
test_file.seek(0)
current_test = 0
number_of_tests = int(test_file.readline())
while(current_test != (number_of_tests)):
loaded_test_value_a = load_value_unsigned_VHDL_memory(test_file, word_size, 1)
loaded_test_value_b = load_value_unsigned_VHDL_memory(test_file, word_size, 1)
loaded_test_rotation = load_value_unsigned_VHDL_memory(test_file, word_size, 1)
loaded_test_value_o = load_value_unsigned_VHDL_memory(test_file, word_size, 1)
computed_test_value_o = (loaded_test_value_b+loaded_test_value_a)&word_full_of_ones
if(computed_test_value_o != loaded_test_value_o):
print("Error in addition at test number : " + str(current_test))
print("Loaded value a")
print(loaded_test_value_a)
print("Loaded value b")
print(loaded_test_value_b)
print("Loaded rotation")
print(loaded_test_rotation)
print("Loaded value o")
print(loaded_test_value_o)
print("Computed value o")
print(computed_test_value_o)
loaded_test_value_o = load_value_unsigned_VHDL_memory(test_file, word_size, 1)
computed_test_value_o = (loaded_test_value_b-loaded_test_value_a)&word_full_of_ones
if(computed_test_value_o != loaded_test_value_o):
print("Error in subtraction at test number : " + str(current_test))
print("Loaded value a")
print(loaded_test_value_a)
print("Loaded value b")
print(loaded_test_value_b)
print("Loaded rotation")
print(loaded_test_rotation)
print("Loaded value o")
print(loaded_test_value_o)
print("Computed value o")
print(computed_test_value_o)
loaded_test_value_o = load_value_unsigned_VHDL_memory(test_file, 2*word_size, 1)
computed_test_value_o = (loaded_test_value_b*loaded_test_value_a)&(double_word_full_of_ones)
if(computed_test_value_o != loaded_test_value_o):
print("Error in multiplication at test number : " + str(current_test))
print("Loaded value a")
print(loaded_test_value_a)
print("Loaded value b")
print(loaded_test_value_b)
print("Loaded rotation")
print(loaded_test_rotation)
print("Loaded value o")
print(loaded_test_value_o)
print("Computed value o")
print(computed_test_value_o)
loaded_test_value_o = load_value_unsigned_VHDL_memory(test_file, word_size, 1)
computed_test_value_o = shr(loaded_test_value_b, loaded_test_rotation, word_size)
if(computed_test_value_o != loaded_test_value_o):
print("Error in shift right at test number : " + str(current_test))
print("Loaded value a")
print(loaded_test_value_a)
print("Loaded value b")
print(loaded_test_value_b)
print("Loaded rotation")
print(loaded_test_rotation)
print("Loaded value o")
print(loaded_test_value_o)
print("Computed value o")
print(computed_test_value_o)
loaded_test_value_o = load_value_unsigned_VHDL_memory(test_file, word_size, 1)
computed_test_value_o = ror(loaded_test_value_b, loaded_test_rotation, word_size)
if(computed_test_value_o != loaded_test_value_o):
print("Error in rotation right at test number : " + str(current_test))
print("Loaded value a")
print(loaded_test_value_a)
print("Loaded value b")
print(loaded_test_value_b)
print("Loaded rotation")
print(loaded_test_rotation)
print("Loaded value o")
print(loaded_test_value_o)
print("Computed value o")
print(computed_test_value_o)
loaded_test_value_o = load_value_unsigned_VHDL_memory(test_file, word_size, 1)
computed_test_value_o = shl(loaded_test_value_b, loaded_test_rotation, word_size)
if(computed_test_value_o != loaded_test_value_o):
print("Error in shift left at test number : " + str(current_test))
print("Loaded value a")
print(loaded_test_value_a)
print("Loaded value b")
print(loaded_test_value_b)
print("Loaded rotation")
print(loaded_test_rotation)
print("Loaded value o")
print(loaded_test_value_o)
print("Computed value o")
print(computed_test_value_o)
loaded_test_value_o = load_value_unsigned_VHDL_memory(test_file, word_size, 1)
computed_test_value_o = rol(loaded_test_value_b, loaded_test_rotation, word_size)
if(computed_test_value_o != loaded_test_value_o):
print("Error in rotation left at test number : " + str(current_test))
print("Loaded value a")
print(loaded_test_value_a)
print("Loaded value b")
print(loaded_test_value_b)
print("Loaded rotation")
print(loaded_test_rotation)
print("Loaded value o")
print(loaded_test_value_o)
print("Computed value o")
print(computed_test_value_o)
loaded_test_value_o = load_value_unsigned_VHDL_memory(test_file, word_size, 1)
if(loaded_test_value_a == loaded_test_value_b):
computed_test_value_o = 1
else:
computed_test_value_o = 0
if(computed_test_value_o != loaded_test_value_o):
print("Error in comparison equal at test number : " + str(current_test))
print("Loaded value a")
print(loaded_test_value_a)
print("Loaded value b")
print(loaded_test_value_b)
print("Loaded rotation")
print(loaded_test_rotation)
print("Loaded value o")
print(loaded_test_value_o)
print("Computed value o")
print(computed_test_value_o)
loaded_test_value_o = load_value_unsigned_VHDL_memory(test_file, word_size, 1)
if(loaded_test_value_a > loaded_test_value_b):
computed_test_value_o = 1
else:
computed_test_value_o = 0
if(computed_test_value_o != loaded_test_value_o):
print("Error in comparison bigger at test number : " + str(current_test))
print("Loaded value a")
print(loaded_test_value_a)
print("Loaded value b")
print(loaded_test_value_b)
print("Loaded rotation")
print(loaded_test_rotation)
print("Loaded value o")
print(loaded_test_value_o)
print("Computed value o")
print(computed_test_value_o)
loaded_test_value_o = load_value_unsigned_VHDL_memory(test_file, word_size, 1)
computed_test_value_o = loaded_test_value_a & loaded_test_value_b
if(computed_test_value_o != loaded_test_value_o):
print("Error in logical AND at test number : " + str(current_test))
print("Loaded value a")
print(loaded_test_value_a)
print("Loaded value b")
print(loaded_test_value_b)
print("Loaded value o")
print(loaded_test_value_o)
print("Computed value o")
print(computed_test_value_o)
loaded_test_value_o = load_value_unsigned_VHDL_memory(test_file, word_size, 1)
computed_test_value_o = loaded_test_value_a | loaded_test_value_b
if(computed_test_value_o != loaded_test_value_o):
print("Error in logical OR at test number : " + str(current_test))
print("Loaded value a")
print(loaded_test_value_a)
print("Loaded value b")
print(loaded_test_value_b)
print("Loaded value o")
print(loaded_test_value_o)
print("Computed value o")
print(computed_test_value_o)
loaded_test_value_o = load_value_unsigned_VHDL_memory(test_file, word_size, 1)
computed_test_value_o = loaded_test_value_a ^ loaded_test_value_b
if(computed_test_value_o != loaded_test_value_o):
print("Error in logical XOR at test number : " + str(current_test))
print("Loaded value a")
print(loaded_test_value_a)
print("Loaded value b")
print(loaded_test_value_b)
print("Loaded value o")
print(loaded_test_value_o)
print("Computed value o")
print(computed_test_value_o)
current_test += 1
test_file.close()
def | |
#!/usr/bin/python
# coding: utf-8
import sys,os
import math
import IRKE_COMMON
import ACP
import ctypes
from Fasta_reader import *
from sequenceUtil import *
from datetime import datetime
import time
import argparse
import resource
import numpy as np
me = 0
TOKEN = ""
MIN_CONNECTIVITY_RATIO = 0.0
MIN_ASSEMBLY_LENGTH = 0 # minimum length of an inchworm assembly for reporting.
MIN_ASSEMBLY_COVERAGE = 2 # minimum average kmer coverage for assembly to be reported.
MIN_SEED_ENTROPY = 1.5 # minimum entropy for a Kmer to seed in inchworm assembly construction.
MIN_SEED_COVERAGE = 2 # minimum kmer coverage for a seed.
DOUBLE_STRANDED_MODE = False # strand-specific by default
WRITE_KMER_FILES = False
MONITOR_MPI_COMMUNICATION = False
def main():
global TOKEN
global MIN_ASSEMBLY_LENGTH
global MIN_ASSEMBLY_COVERAGE
global MIN_CONNECTIVITY_RATIO
global MIN_SEED_ENTROPY
global MIN_SEED_COVERAGE
global DOUBLE_STRANDED_MODE
global WRITE_KMER_FILES
global MONITOR_MPI_COMMUNICATION
parser = argparse.ArgumentParser()
parser.add_argument('--acp-myrank', action='store', dest='myrank')
parser.add_argument('--acp-nprocs', action='store', dest='nprocs')
parser.add_argument('--acp-taskid', action='store', dest='taskid')
parser.add_argument('--acp-port-local', action='store', dest='port_local')
parser.add_argument('--acp-port-remote', action='store', dest='port_remote')
parser.add_argument('--acp-host-remote', action='store', dest='host_remote')
parser.add_argument('--acp-size-smem', action='store', dest='size_smem')
parser.add_argument('--acp-size-smem-cl', action='store', dest='size_smem_cl')
parser.add_argument('--acp-size-smem-dl', action='store', dest='size_smem_dl')
# required params
parser.add_argument('--reads', action='store', dest='fasta_filename')
parser.add_argument('--kmers', action='store', dest='fasta_filename')
parser.add_argument('--token', action='store', dest='token')
# optional args
parser.add_argument('--K', action='store', dest='kmer_length')
parser.add_argument('--minKmerCount', action='store', dest='min_kmer_count')
parser.add_argument('--L', action='store', dest='min_assembly_length')
parser.add_argument('--min_assembly_coverage', action='store', dest='min_assembly_coverage')
parser.add_argument('--monitor', action='store', dest='monitor')
parser.add_argument('--min_con_ratio', action='store', dest='min_con_ratio')
parser.add_argument('--DS')
parser.add_argument('--min_seed_entropy', action='store', dest='min_seed_entropy')
parser.add_argument('--min_seed_coverage', action='store', dest='min_seed_coverage')
parser.add_argument('--max_test_kmers', action='store', dest='max_test_kmers')
parser.add_argument('--write_kmer_files')
parser.add_argument('--keep_tmp_files')
parser.add_argument('--no_prune_error_kmers')
parser.add_argument('--min_ratio_non_error', action='store', dest='min_ratio_non_error')
parser.add_argument('app_args', nargs='*')
args = parser.parse_args()
output = open("output.%s" % (args.myrank,), "w")
os.dup2(output.fileno(), sys.stdout.fileno())
output.close()
errout = open("err.%s" % (args.myrank,), "w")
os.dup2(errout.fileno(), sys.stderr.fileno())
errout.close()
if args.fasta_filename == '':
print "Error, must specify --kmers or --reads"
exit(4)
kmer_length = 25
if args.kmer_length:
kmer_length = int(args.kmer_length)
print "kmer length set to %d" % (kmer_length,)
min_kmer_count = 1
if args.min_kmer_count:
min_kmer_count = int(args.min_kmer_count)
MIN_ASSEMBLY_LENGTH = kmer_length
if args.min_assembly_length:
MIN_ASSEMBLY_LENGTH = int(args.min_assembly_length)
if args.min_assembly_coverage:
MIN_ASSEMBLY_COVERAGE = int(args.min_assembly_coverage)
#if args.monitor:
# IRKE_COMMON::MONITOR = args.monitor
if args.min_con_ratio:
MIN_CONNECTIVITY_RATIO = float(args.min_con_ratio)
if args.DS:
DOUBLE_STRANDED_MODE = True
if args.min_seed_entropy:
MIN_SEED_ENTROPY = float(args.min_seed_entropy)
if args.min_seed_coverage:
MIN_SEED_COVERAGE = int(args.min_seed_coverage)
# some testing parameters.
if args.max_test_kmers:
MAX_TEST_KMERS = int(args.max_test_kmers)
if args.write_kmer_files:
WRITE_KMER_FILES = True
if args.keep_tmp_files:
KEEP_TMP_FILES = True
# end of testing params
prune_error_kmers = True
# kmer error removal options
if args.no_prune_error_kmers:
prune_error_kmers = False
min_ratio_non_error = 0.05
if prune_error_kmers and args.min_ratio_non_error:
min_ratio_non_error = float(args.min_ratio_non_error)
ACP.init('udp') # ACP must be initialized before creating DistributedKmerCounter
kcounter = DistributedKmerCounter(kmer_length, DOUBLE_STRANDED_MODE)
read_file(args.fasta_filename, kcounter, kmer_length)
if args.token:
TOKEN = args.token
ACP.sync()
# next phase
# k-kmer pruning
if prune_error_kmers:
do_prune_error_kmers(kcounter, min_ratio_non_error)
do_assembly(kcounter, kmer_length)
if ACP.rank() == 0:
seen_contig_already = {} #map<unsigned long long, bool>
INCHWORM_ASSEMBLY_COUNTER = 0
assembly_start_node = 0
assembly_end_node = ACP.procs() - 1
for i in range(0, assembly_end_node+1):
tmp_contig_file = get_ACP_proc_filename(i)
sequence = ""
tmpreader = open(tmp_contig_file, "r")
for line in tmpreader:
sequence = line.rstrip()
contig_length = len(sequence)
contig_hash = generateHash(sequence)
if not contig_hash in seen_contig_already:
seen_contig_already[contig_hash] = True
INCHWORM_ASSEMBLY_COUNTER = INCHWORM_ASSEMBLY_COUNTER + 1
header = ">a" + str(INCHWORM_ASSEMBLY_COUNTER) \
+ " K: " + str(kmer_length) \
+ " length: " + str(len(sequence))
sequence = add_fasta_seq_line_breaks(sequence, 60)
print header
print sequence
tmpreader.close()
print "DONE."
class Kmer_visitor():
def __init__(self, kmer_length, is_ds):
self.kmer_length = kmer_length
self.ds_mode = is_ds
self.set = []
def add(self, kmer):
if isinstance(kmer, str):
kmer = kmer_to_intval(kmer)
if self.ds_mode:
kmer = get_DS_kmer_val(kmer, self.kmer_length)
self.set.append(kmer)
def exists(self, kmer):
if isinstance(kmer, str):
kmer = kmer_to_intval(kmer)
if self.ds_mode:
kmer = get_DS_kmer_val(kmer, self.kmer_length)
if kmer in self.set:
return True
else:
return False
def erase(self, kmer):
if isinstance(kmer, str):
kmer = kmer_to_intval(kmer)
if self.ds_mode:
kmer = get_DS_kmer_val(kmer, self.kmer_length)
if kmer in self.set:
self.set.remove(kmer)
def clear(self):
self.set = []
def size(self):
return len(self.set)
class Segment():
def __del__(self):
ACP.unregister_memory(self.key)
def __init__(self, ctype, size):
self.type = ctype
self.size = size
self.typesize = ctypes.sizeof(ctype)
self.bytesize = self.typesize * self.size
self.buffer = ctypes.create_string_buffer(self.bytesize)
self.prettybuffer = ctypes.cast(self.buffer, ctypes.POINTER(ctype))
self.key = ACP.register_memory(ctypes.cast(self.buffer, ctypes.c_void_p), self.bytesize, 0)
self.ga = ACP.query_ga(self.key, ctypes.cast(self.buffer, ctypes.c_void_p))
def __getitem__(self, key):
return self.prettybuffer[key]
def __setitem__(self, key, value):
self.prettybuffer[key] = value
class DistributedKmerCounter():
def __init__(self, kmer_length, ds_mode): # collective
# key=64bit value=64bit
self.kmer_length = kmer_length
self.ds_mode = ds_mode
self.multiset = ACP.SingleNodeMultiset()
my_multiset_ga = self.multiset.multiset.ga
print my_multiset_ga
self.ga_buffer = Segment(ctypes.c_ulonglong, ACP.procs())
self.addcount = 0
ACP.sync()
local_starter_ga = ACP.query_starter_ga(ACP.rank())
starter_memory = ACP.query_address(local_starter_ga)
stmem_as_ull = ctypes.cast(starter_memory, ctypes.POINTER(ctypes.c_ulonglong))
stmem_as_ull[0] = ctypes.c_ulonglong(my_multiset_ga)
ACP.sync()
for rank in range(ACP.procs()):
ACP.copy(self.ga_buffer.ga + (rank * self.ga_buffer.typesize), ACP.query_starter_ga(rank), self.ga_buffer.typesize, ACP.HANDLE_NULL)
ACP.complete()
ACP.sync()
# for rank in range(ACP.procs()):
# print "multiset's ga[%d]: %x" % (rank, self.ga_buffer[rank])
self.remotemultiset = []
for rank in range(ACP.procs()):
if rank == ACP.rank():
self.remotemultiset.append(self.multiset)
else:
self.remotemultiset.append(ACP.SingleNodeMultiset(self.ga_buffer[rank]))
print self.remotemultiset
ACP.sync()
self.lock = ACP.Lock()
stmem_as_ull[0] = ctypes.c_ulonglong(self.lock.lock_ga)
ACP.sync()
for rank in range(ACP.procs()):
ACP.copy(self.ga_buffer.ga + (rank * self.ga_buffer.typesize), ACP.query_starter_ga(rank), self.ga_buffer.typesize, ACP.HANDLE_NULL)
ACP.complete()
ACP.sync()
self.remotelock = []
for rank in range(ACP.procs()):
if rank == ACP.rank():
self.remotelock.append(self.lock)
else:
self.remotelock.append(ACP.Lock(self.ga_buffer[rank]))
print self.remotelock
def size(self):
return self.multiset.size()
def get_kmer_string(self, kmer_val):
return decode_kmer_from_intval(kmer_val, self.kmer_length)
def get_contains_non_gatc(self, kmer):
return contains_non_gatc(kmer)
def get_kmer_intval(self, kmer):
return kmer_to_intval(kmer)
def get_kmer_length(self):
return self.kmer_length
def get_central_kmer(self, kmer):
# given ABCDE, want BCD
kmer = kmer >> 2 # remove last nucleotide
kmer_mask = long(math.pow(2,2*( (self.kmer_length-1) -1) ) -1) # remove first nucleotide of the resulting (kmer-1) to get the core seq
central_kmer = kmer & kmer_mask
return central_kmer
def get_central_right_kmer(self, kmer):
# given ABCDE, want CDE
kmer_mask = long(math.pow(2,2*(self.kmer_length-2))-1) # remove first two nucleotides of kmer
central_kmer = kmer & kmer_mask
return central_kmer
def get_central_left_kmer(self, kmer):
# given ABCDE, want ABC
return kmer >> 4 # shift out the last two nucleotides.
def get_node_for_central_kmer(self, central_kmer):
#print "get_node_for_central_kmer(%d)" % (central_kmer,)
canonical_central_kmer = central_kmer
rev_central_kmer = revcomp_val(central_kmer, self.kmer_length-2)
if rev_central_kmer < canonical_central_kmer:
canonical_central_kmer = rev_central_kmer
#print "canonical_central_kmer = %d" % (canonical_central_kmer,)
node_for_kmer = canonical_central_kmer % ACP.procs()
#print "node_for_kmer = %d" % (node_for_kmer,)
if False: #IRKE_COMMON.MONITOR >= 4:
print "Kmer: " + decode_kmer_from_intval(central_kmer, self.kmer_length-2) + " or " \
+ decode_kmer_from_intval(canonical_central_kmer, self.kmer_length-2) + " assigned to node: " + str(node_for_kmer)
# all nodes are kmer servers
return node_for_kmer
def add_kmer(self, kmer, count):
ck = self.get_central_kmer(kmer)
node = self.get_node_for_central_kmer(ck)
kmerstr = kmerstr_to_colored_kmer(decode_kmer_from_intval(kmer, self.kmer_length))
ckmerstr = kmerstr_to_colored_kmer(decode_kmer_from_intval(ck, self.kmer_length-2))
#print "add_kmer(%s, %d): central_kmer = %s, node = %d" % (kmerstr, count, ckmerstr, node)
#print "add_kmer(%s, %d) node = %d" % (kmerstr, count, node)
if self.ds_mode:
kmer = self.get_DS_kmer(kmer, self.kmer_length)
self.addcount = self.addcount + 1
if self.addcount % 1000 == 0:
print "added %d kmers" % (self.addcount,)
self.remotemultiset[node].increment(kmer, count)
def find_kmer(self, kmer):
if isinstance(kmer, str):
kmer = self.get_kmer_intval(kmer)
if self.ds_mode:
kmer = self.get_DS_kmer(kmer, self.kmer_length)
return self.multiset[kmer]
def kmer_exists(self, kmer):
if isinstance(kmer, str):
kmer = self.get_kmer_intval(kmer)
return self.get_kmer_count(kmer) > 0
def get_kmer_count(self, kmer):
if isinstance(kmer, str):
kmer = get_kmer_intval(kmer)
return self.multiset[kmer]
def get_forward_kmer_candidates(self, seed_kmer):
candidates = self.get_forward_kmer_candidates_unsorted(seed_kmer, False)
#print candidates
tmp = sorted(candidates, key=lambda x: x[1])
candidates = tmp
candidates.reverse()
return candidates
def get_forward_kmer_candidates_unsorted(self, seed_kmer, getZeros):
forward_prefix = ((seed_kmer << (33-self.kmer_length) * 2) & 0xFFFFFFFFFFFFFFFF) >> (32 - self.kmer_length)*2
candidates = []
for i in range(4):
k = forward_prefix | i
ck = self.get_central_kmer(k)
node = self.get_node_for_central_kmer(ck)
#count = self.get_kmer_count(k)
count = self.remotemultiset[node][k]
if count > 0 or getZeros:
candidates.append((k,count))
return candidates
def get_reverse_kmer_candidates(self, seed_kmer):
candidates = self.get_reverse_kmer_candidates_unsorted(seed_kmer, False)
tmp = sorted(candidates, key=lambda x: x[1])
candidates = tmp
candidates.reverse()
return candidates
def get_reverse_kmer_candidates_unsorted(self, seed_kmer, getZeros):
reverse_suffix = seed_kmer >> 2
candidates = [] #{}
for i in range(4):
k = (i << (self.kmer_length*2 - 2)) | reverse_suffix
ck = self.get_central_kmer(k)
node = self.get_node_for_central_kmer(ck)
count = self.remotemultiset[node][k]
if count > 0 or getZeros:
candidates.append((k,count))
return candidates
def get_kmers_sort_descending_counts(self): # sort local kmers
print "Getting vec of kmers"
num_kmers = self.multiset.size()
start = datetime.now()
dtype = [('key', np.uint64), ('value', np.uint64)]
kmer_arr = np.empty([num_kmers], dtype=dtype)
print "Kcounter hash size: %d" % (num_kmers,)
count = 0
for k in self.multiset.items():
v = self.multiset[k]
if v > 0:
kmer_arr[count][0] = k
kmer_arr[count][1] = v
count = count + 1
if count % 1000 == 0:
print "count=%d" % (count,)
print "Processed %d non-zero abundance kmers in kcounter." % (count,)
if IRKE_COMMON.DEVEL_no_kmer_sort:
print "-Not sorting list of kmers, given parallel mode in effect."
return kmer_list
print "Sorting %d kmers ..." % (count,)
kmer_arr[0:count].sort(order='value')
end = datetime.now()
time_spent = end - start
print "Done sorting %d kmers, taking %s | |
in user_signatures:
cla.log.debug(f'{fn} - user not in one of the approval lists - '
'marking signature approved = false for '
f'user: {user}, project_id: {project}, company_id: {company_id}, '
f'signature: {signature.get_signature_id()}')
signature.set_signature_approved(False)
signature.save()
event_data = (f'The employee signature of user {user.get_user_name()} was '
f'disapproved the during CCLA check for project {project.get_project_name()} '
f'and company {company.get_company_name()}')
Event.create_event(
event_type=EventType.EmployeeSignatureDisapproved,
event_cla_group_id=project.get_project_id(),
event_company_id=company.get_company_id(),
event_user_id=user.get_user_id(),
event_data=event_data,
event_summary=event_data,
contains_pii=True,
)
else:
cla.log.debug(f'{fn} - CCLA signature check - unable to load signed CCLA for project|company, '
f'user: {user}, project_id: {project}, company_id: {company_id} - '
'signatory needs to sign the CCLA before the user can be authorized')
else:
cla.log.debug(f'{fn} - CCLA signature check - unable to load employee acknowledgement for project|company, '
f'user: {user}, project_id: {project}, company_id: {company_id}, '
'signed=true, approved=true - user needs to be associated with an organization before '
'they can be authorized.')
else:
cla.log.debug(f'{fn} - CCLA signature check failed - user is NOT associated with a company - '
f'unable to check for a CCLA, user info: {user}.')
if ccla_pass:
cla.log.debug(f'{fn} - CCLA signature check passed for user: {user} on project: {project}')
return True
else:
cla.log.debug(f'{fn} - CCLA signature check failed for user: {user} on project: {project}')
cla.log.debug(f'{fn} - User: {user} failed both ICLA and CCLA checks')
return False
def get_redirect_uri(repository_service, installation_id, github_repository_id, change_request_id):
"""
Function to generate the redirect_uri parameter for a repository service's OAuth2 process.
:param repository_service: The repository service provider we're currently initiating the
OAuth2 process with. Currently only supports 'github' and 'gitlab'.
:type repository_service: string
:param installation_id: The EasyCLA GitHub application ID
:type installation_id: string
:param github_repository_id: The ID of the repository object that applies for this OAuth2 process.
:type github_repository_id: string
:param change_request_id: The ID of the change request in question. Is a PR number if
repository_service is 'github'. Is a merge request if the repository_service is 'gitlab'.
:type change_request_id: string
:return: The redirect_uri parameter expected by the OAuth2 process.
:rtype: string
"""
params = {'installation_id': installation_id,
'github_repository_id': github_repository_id,
'change_request_id': change_request_id}
params = urllib.parse.urlencode(params)
return '{}/v2/repository-provider/{}/oauth2_redirect?{}'.format(cla.conf['API_BASE_URL'], repository_service,
params)
def get_full_sign_url(repository_service, installation_id, github_repository_id, change_request_id, project_version):
"""
Helper function to get the full sign URL that the user should click to initiate the signing
workflow.
:param repository_service: The repository service provider we're getting the sign url for.
Should be one of the supported repository providers ('github', 'gitlab', etc).
:type repository_service: string
:param installation_id: The EasyCLA GitHub application ID
:type installation_id: string
:param github_repository_id: The ID of the repository for this signature (used in order to figure out
where to send the user once signing is complete.
:type github_repository_id: int
:param change_request_id: The change request ID for this signature (used in order to figure out
where to send the user once signing is complete. Should be a pull request number when
repository_service is 'github'. Should be a merge request ID when repository_service is
'gitlab'.
:type change_request_id: int
:param project_version: Project version associated with PR
:type project_version: string
"""
base_url = '{}/v2/repository-provider/{}/sign/{}/{}/{}/#/'.format(cla.conf['API_BASE_URL'], repository_service,
str(installation_id),
str(github_repository_id),
str(change_request_id))
return append_project_version_to_url(address=base_url, project_version=project_version)
def append_project_version_to_url(address: str, project_version: str) -> str:
"""
appends the project version to given url if not already exists
:param address:
:param project_version:
:return: returns the final url
"""
version = "1"
if project_version and project_version == 'v2':
version = "2"
# seem if the url has # in it (https://dev.lfcla.com/#/version=1) the underlying urllib is being confused
# In[21]: list(urlparse.urlparse(address))
# Out[21]: ['https', 'dev.lfcla.com', '/', '', '', '/#/?version=1']
query = {}
if "?" in address:
query = dict(urlparse.parse_qsl(address.split("?")[1]))
# we don't alter for now
if "version" in query:
return address
query["version"] = version
query_params_str = urlencode(query)
if "?" in address:
return "?".join([address.split("?")[0], query_params_str])
return "?".join([address, query_params_str])
def get_comment_badge(repository_type, all_signed, sign_url, project_version, missing_user_id=False,
is_approved_by_manager=False):
"""
Returns the CLA badge that will appear on the change request comment (PR for 'github', merge
request for 'gitlab', etc)
:param repository_type: The repository service provider we're getting the badge for.
Should be one of the supported repository providers ('github', 'gitlab', etc).
:type repository_type: string
:param all_signed: Whether or not all committers have signed the change request.
:type all_signed: boolean
:param sign_url: The URL for the user to click in order to initiate signing.
:type sign_url: string
:param missing_user_id: Flag to check if github id is missing
:type missing_user_id: bool
:param is_approved_by_manager; Flag checking if unregistered CLA user has been approved by a CLA Manager
:type is_approved_by_manager: bool
"""
alt = 'CLA'
if all_signed:
badge_url = f'{CLA_LOGO_URL}/cla-signed.svg'
badge_hyperlink = cla.conf["CLA_LANDING_PAGE"]
badge_hyperlink = os.path.join(badge_hyperlink, "#/")
badge_hyperlink = append_project_version_to_url(address=badge_hyperlink, project_version=project_version)
alt = "CLA Signed"
else:
if missing_user_id:
badge_url = f'{CLA_LOGO_URL}/cla-missing-id.svg'
alt = 'CLA Missing ID'
elif is_approved_by_manager:
badge_url = f'{CLA_LOGO_URL}/cla-confirmation-needed.svg'
alt = 'CLA Confirmation Needed'
else:
badge_url = f'{CLA_LOGO_URL}/cla-not-signed.svg'
alt = "CLA Not Signed"
badge_hyperlink = sign_url
# return '[](' + badge_hyperlink + ')'
return (f'<a href="{badge_hyperlink}">'
f'<img src="{badge_url}" alt="{alt}" align="left" height="28" width="328" >'
'</a><br/>')
def assemble_cla_status(author_name, signed=False):
"""
Helper function to return the text that will display on a change request status.
For GitLab there isn't much space here - we rely on the user hovering their mouse over the icon.
For GitHub there is a 140 character limit.
:param author_name: The name of the author of this commit.
:type author_name: string
:param signed: Whether or not the author has signed an signature.
:type signed: boolean
"""
if author_name is None:
author_name = 'Unknown'
if signed:
return author_name, 'EasyCLA check passed. You are authorized to contribute.'
return author_name, 'Missing CLA Authorization.'
def assemble_cla_comment(repository_type, installation_id, github_repository_id, change_request_id, signed, missing,
project_version):
"""
Helper function to generate a CLA comment based on a a change request.
:TODO: Update comments
:param repository_type: The type of repository this comment will be posted on ('github',
'gitlab', etc).
:type repository_type: string
:param installation_id: The EasyCLA GitHub application ID
:type installation_id: string
:param github_repository_id: The ID of the repository for this change request.
:type github_repository_id: int
:param change_request_id: The repository service's ID of this change request.
:type change_request_id: id
:param signed: The list of commit hashes and authors that have signed an signature for this
change request.
:type signed: [(string, string)]
:param missing: The list of commit hashes and authors that have not signed for this
change request.
:type missing: [(string, list)]
:param project_version: Project version associated with PR comment
:type project_version: string
"""
num_missing = len(missing)
missing_ids = list(filter(lambda x: x[1][0] is None, missing))
no_user_id = len(missing_ids) > 0
# check if an unsigned committer has been approved by a CLA Manager, but not associated with a company
# Logic not supported as we removed the DB query in the caller
# approved_ids = list(filter(lambda x: len(x[1]) == 4 and x[1][3] is True, missing))
# approved_by_manager = len(approved_ids) > 0
sign_url = get_full_sign_url(repository_type, installation_id, github_repository_id, change_request_id,
project_version)
comment = get_comment_body(repository_type, sign_url, signed, missing)
all_signed = num_missing == 0
badge = get_comment_badge(
repository_type=repository_type,
all_signed=all_signed,
sign_url=sign_url,
project_version=project_version,
missing_user_id=no_user_id)
return badge + '<br />' + comment
def get_comment_body(repository_type, sign_url, signed, missing):
"""
Returns the CLA comment that will appear on the repository provider's change request item.
:param repository_type: The repository type where this comment will be posted ('github',
'gitlab', etc).
:type repository_type: string
:param sign_url: The URL for the user to click in order to initiate signing.
:type sign_url: string
:param signed: List of tuples containing the commit and author name of signers.
:type signed: [(string, string)]
:param missing: List of tuples containing the commit and author name of not-signed users.
:type missing: [(string, list)]
"""
fn = "utils.get_comment_body"
cla.log.info(f"{fn} - Getting comment body for repository type: %s", repository_type)
failed = ":x:"
success = ":white_check_mark:"
committers_comment = ""
num_signed = len(signed)
num_missing = len(missing)
if num_signed > 0:
# Group commits by author.
committers = {}
for commit, author in signed:
if author is None:
author = "Unknown"
if author not in committers:
committers[author] = []
committers[author].append(commit)
# Print author commit information.
committers_comment += "<ul>"
for author, commit_hashes in committers.items():
committers_comment += "<li>" + | |
import sys
import math
import os
import time
import random
import copy
# --------------------
# OBJ : operations
# DESC: Dictionary object in which the keys are a string expression and the item is a lambda function.
# --------------------
operations = {
"const": lambda p: p[0],
"add" : lambda p: p[0].evaluate() + p[1].evaluate(),
"sub" : lambda p: p[0].evaluate() - p[1].evaluate(),
"mul" : lambda p: p[0].evaluate() * p[1].evaluate(),
"div" : lambda p: (lambda x, y: x.evaluate()/y if y != 0 else 0)(p[0],p[1].evaluate()),
"pow" : lambda p: (lambda x,y: (lambda n,m: 0 if (n < 0 and not float(m).is_integer()) or (n == 0 and m < 0) else float(n) ** float(m))(x.evaluate(), y) if y != 0 else 1)(p[0], p[1].evaluate()),
"sqrt" : lambda p: (lambda x: math.sqrt(x) if x >= 0 else 0)(p[0].evaluate()),
"log" : lambda p: (lambda x: math.log(x, 2) if x > 0 else 0)(p[0].evaluate()),
"exp" : lambda p: (lambda x: x if x != float("inf") else 0)(math.exp(p[0].evaluate())),
"max" : lambda p: max(p[0].evaluate(), p[1].evaluate()),
"ifleq": lambda p: p[2].evaluate() if p[0].evaluate() <= p[1].evaluate() else p[3].evaluate(),
"data" : lambda p, inp=[], n=0: inp[abs(math.floor(p[0].evaluate()))%n] if n != 0 else 0,
"diff" : lambda p, inp=[], n=0: inp[abs(math.floor(p[0].evaluate()))%n] - inp[abs(math.floor(p[1].evaluate()))%n] if n != 0 else 0,
"avg" : lambda p, inp=[], n=0: (lambda x, y, inp: (lambda i, j, inp: sum(inp[i:j])/(j-i))(min(x,y),max(x,y),inp) if x != y else 0)
(math.floor(abs(p[0].evaluate()))%n, math.floor(abs(p[1].evaluate()))%n,inp) if n != 0 else 0
}
op_branches = {"add": 2, "sub": 2, "mul": 2, "div": 2, "pow": 2, "sqrt": 1, "log": 1, "exp": 1, "max": 2, "ifleq": 4, "data": 1, "diff": 2, "avg": 2}
# --------------------
# OBJ : Node
# DESC: Class representing a tree node. Contains operation, children (vals), size of subtree and fitness.
# --------------------
class Node():
def __init__(self, _op, _vals, _size=0, _depth=0, _fitness=0):
self.op = _op
self.vals = _vals
self.size = _size
self.depth = _depth
self.fitness = _fitness
def evaluate(self):
try:
ans = operations[self.op](self.vals)
if ans == float('inf') or math.isnan(ans): return 0
else: return ans
except OverflowError:
return 0
def display(self):
print(out_tree(self))
# --------------------
# FUNC : main
# RETURN: None
# DESC : Parses command line args and calls function based on questions parameter.
# --------------------
def main(args):
n = 0
inp = []
try:
question = int(args[args.index("-question")+1])
assert question <= 3 and question >= 1
except:
print("Question not specified. Include the flag '-question' followed by a number in {1, 2, 3}.")
raise
try:
n = int(args[args.index("-n")+1])
except:
print("Dimension size not specified. Include the flag '-n' followed by the dimensions of the input vector.")
raise
if question == 1 or question == 2:
try:
exp = args[args.index("-expr")+1]
except:
print("Expression not specified. Include the flag '-expr' followed by an expression to evaluate.")
raise
if question == 2 or question == 3:
try:
m = int(args[args.index("-m")+1])
except:
print("Training data dimension not specified. Include the flag '-m' followed by the dimensions of the training data.")
raise
try:
data = args[args.index("-data")+1]
assert os.path.isfile(data)
x, y = read_file(data)
assert len(x) == m and len(y) == m
assert all(len(i) == n for i in x)
except:
print("Data file error. Include the flag '-data' followed a filename containing data of dimension m by n+1.")
raise
if question == 1:
try:
x = args[args.index("-x")+1].split(' ')
inp = [float(i) for i in x]
assert len(inp) == n
except:
print("Input vector error. Include the flag '-x' followed by a space seperated list of floats, of the same dimension as n.")
raise
print(create_tree(exp, inp, n).evaluate())
elif question == 2:
tree = parse_exp(exp)
print(fitness(tree,x,y,m,n))
elif question == 3:
try:
pop_size = int(args[args.index("-lambda")+1])
assert pop_size > 0
except:
print("Population size not specified. Include the flag '-lambda' followed by the population size.")
raise
try:
time_budget = int(args[args.index("-time_budget")+1])
assert time_budget > 0
except:
print("Time budget not specified. Include the flag '-time_budget' followed by the max time in seconds.")
raise
a = generate_exp(pop_size, n, m, x, y, time_budget)
print(output_exp(a))
# print(a.fitness)
# --------------------
# FUNC : create_tree
# RETURN: Node Object
# DESC : Generates tree based on input string.
# --------------------
def create_tree(exp, inp, n):
refresh_lambda(inp, n)
return parse_exp(exp)
# --------------------
# FUNC : parse_exp
# RETURN: Node Object
# DESC : Converts inputted expression string into a tree of Node objects.
# --------------------
def parse_exp(exp):
oBracket = exp.find('(')
cBracket = exp.rfind(')')
if oBracket == -1 and cBracket == -1:
return Node("const", [float(exp)])
exp = split_exp(exp[oBracket+1:cBracket])
vals = []
for i in range(1, len(exp)):
vals.append(parse_exp(exp[i]))
return Node(exp[0], vals)
# --------------------
# FUNC : split_exp
# RETURN: String
# DESC : Partitions expression string by spaces not in brackets to isolate sub trees.
# --------------------
def split_exp(exp):
b_count = 0
breaks = [0]
for i in range(0, len(exp)):
if exp[i] == ' ' and b_count == 0: breaks.append(i)
elif exp[i] == '(': b_count += 1
elif exp[i] == ')': b_count -= 1
return [exp[i:j] for i,j in zip(breaks, breaks[1:]+[None])]
# --------------------
# FUNC : refresh_lambda
# RETURN: None
# DESC : Sets inp and n parameters of lambda functions to use the data in evaluation of trees.
# --------------------
def refresh_lambda(inp, n):
global operations
operations.update({
"data": lambda p, inp=inp, n=n: inp[abs(math.floor(p[0].evaluate()))%n] if n != 0 else 0,
"diff": lambda p, inp=inp, n=n: inp[abs(math.floor(p[0].evaluate()))%n] - inp[abs(math.floor(p[1].evaluate()))%n] if n != 0 else 0,
"avg" : lambda p, inp=inp, n=n: (lambda x, y, inp: (lambda i, j, inp: sum(inp[i:j])/(j-i))(min(x,y),max(x,y),inp) if x != y else 0)(abs(math.floor(p[0].evaluate()))%n, abs(math.floor(p[1].evaluate()))%n,inp) if n != 0 else 0
})
# --------------------
# FUNC : read_file
# RETURN: Two lists of Floats
# DESC : Reads file and partitions data into two sets. One to evaluate and one to measure error.
# --------------------
def read_file(filename):
with open(filename) as f: data = f.readlines()
data = [x.split('\t') for x in data]
x = [[float(j.strip()) for j in i[:len(i)-1]] for i in data]
y = [float(i[len(i)-1].strip()) for i in data]
return x, y
# --------------------
# FUNC : fitness
# RETURN: Float
# DESC : Calculates the fitness of a tree using mean squared error.
# --------------------
def fitness(tree,x,y,m,n):
try:
fit = 0
for i in range(0, m):
inp = x[i]
refresh_lambda(inp,n)
fit += ( (y[i] - tree.evaluate()) ** 2) / m
return fit
except OverflowError: return float('inf')
# --------------------
# FUNC : output_exp
# RETURN: String
# DESC : Converts Node tree into string expression.
# --------------------
def output_exp(tree):
if tree.op == "const": return str(tree.vals[0])
param_string = ""
for i in tree.vals:
param_string += output_exp(i) + ' '
param_string = param_string[:-1]
return '(' + tree.op + ' ' + param_string + ')'
# --------------------
# FUNC : generate_exp
# RETURN: Node object
# DESC : Runs the genetic algorithm which generates a Node tree expression.
# --------------------
def generate_exp(pop_size, n, m, x, y, time_budget, max_depth=5, mutation_rate=0.5, elite=0.1, tourn_size=8, test_pop=None):
elite = int(elite * pop_size)
start = time.time()
if test_pop == None:
population = []
for i in range(0, pop_size):
new_tree = gen_random_tree(n, max_depth)
new_tree.fitness = fitness(new_tree,x,y,m,n)
population.append(new_tree)
else:
population = test_pop
while(time.time() < start + time_budget):
population = gen_new_pop(copy.deepcopy(population), pop_size, mutation_rate, elite, max_depth, tourn_size, x, y, m, n)
population.sort(key=lambda x: x.fitness)
return population[0]
# --------------------
# FUNC : gen_random_tree
# RETURN: Node object
# DESC : Generates a random Node tree.
# --------------------
def gen_random_tree(n, max_depth):
if max_depth <= 2:
if random.uniform(0, 1) < 0.3: return Node("const", [random.randint(0,n-1)], 1, 1)
else:
val = Node("const", [random.randint(0,n-1)], 1, 1)
return Node("data", [val], 2, 2)
op = random.choice(list(op_branches.keys()))
branches = op_branches[op]
vals = []
size = 0
depth = 0
for i in range(0, branches):
child = gen_random_tree(n, max_depth-1)
vals.append(child)
size += child.size
if child.depth > depth: depth = child.depth
return Node(op, vals, size + 1, depth + 1)
# --------------------
# FUNC : sort_tree
# RETURN: Float
# DESC : Used to sort objects using fitness and depth to prevent bloating.
# --------------------
def sort_tree(s, max_depth):
if s.depth <= max_depth: return s.fitness
else: return s.fitness * (1.3 ** (s.depth - max_depth))
# --------------------
# FUNC : select
# RETURN: Two Node objects
# DESC : Runs tournament selection on a given list of nodes.
# --------------------
def select(trees, max_depth, k=2):
parents = random.sample(trees, k=k)
parents.sort(key=lambda x: sort_tree(x,max_depth))
return copy.deepcopy(parents[0]), copy.deepcopy(parents[1])
# --------------------
# FUNC : crossover
# RETURN: Two Node objects
# DESC : Takes two Node trees, selects a random branch from each and switches them.
# --------------------
def crossover(p1,p2):
p1_branch, p1_path = choose_branch(p1)
p2_branch, p2_path = choose_branch(p2)
p1 = replace_branch(p1, p1_path, p2_branch)
p2 = replace_branch(p2, p2_path, p1_branch)
return p1, p2
# --------------------
# FUNC : choose_branch
# RETURN: Node object and List of Ints
# DESC : Selects a random branch from a given tree with equal probability. Returns branch and path to branch in original tree.
# --------------------
def choose_branch(tree, path = []):
choice = random.uniform(0, 1)
prob_inc = 1/tree.size
if choice <= prob_inc: return tree, path
prob_count = prob_inc
for i in range(0, len(tree.vals)):
prob_count += tree.vals[i].size * prob_inc
if (choice <= prob_count):
current_path = path + [i]
return choose_branch(tree.vals[i], current_path)
# --------------------
# FUNC : replace_branch
# RETURN: Node objects
# DESC : Replaces a branch on a given Node tree, using the given path to the branch and given replacement tree.
# --------------------
def replace_branch(tree, path, replace):
if path == []: return replace
ind = path[0]
tree.vals[ind] = replace_branch(tree.vals[ind], path[1:], replace)
if tree.vals[ind].depth >= tree.depth: tree.depth = tree.vals[ind].depth + 1
tree.size = 0
for i in tree.vals: tree.size += i.size
return tree
# --------------------
# FUNC : mutate
# RETURN: Node object
# DESC : Selects a random branch, generates a random tree, and replaces the branch with the random tree.
# --------------------
def mutate(tree, n, max_depth):
branch, path = choose_branch(tree)
branch = gen_random_tree(n, max_depth-len(path))
return replace_branch(tree, path, branch)
# | |
char_bw, _ = self.char_lstm_backward(utils.reverse_sequence(chars_embeds.transpose(0, 1), gpu=self.use_gpu))
bw = char_bw[-1, :, :]
fw = char_fw[-1, :, :]
tok_rep = torch.cat([word_embeds[word_idx].unsqueeze(0), fw, bw], 1)
tok_rep = torch.tanh(self.tok_2_embed(tok_rep))
else:
tok_rep = word_embeds[word_idx].unsqueeze(0)
if self.use_bert and self.idx2word[word] not in ['<ROOT>', '<unaligned>', '<eof>']:
bert_embed = torch.from_numpy(bert_embedding[word_idx]).float()
if self.use_gpu:
bert_embed = bert_embed.cuda()
tok_rep = torch.cat([tok_rep, bert_embed.unsqueeze(0)], 1)
tok_rep = self.pretrained_2_embed(tok_rep)
if word_idx == 0:
token_embedding = tok_rep
elif sentence_array[word_idx] != 1:
token_embedding = torch.cat([token_embedding, tok_rep], 0)
bufferint = []
stackint = []
latentint = []
# add tokens to buffer
i = len(sentence_array) - 1
for idx, token in zip(reversed(sentence_array), reversed(tokens)):
if self.idx2word[idx] != '<eof>':
tok_embed = token_embedding[i].unsqueeze(0)
if self.idx2word[idx] == '<unaligned>' and self.parse_unaligned:
latent.append((tok_embed, token))
latentint.append(idx)
else:
buffer.push(tok_embed, token)
bufferint.append(idx if token != '<ROOT>' else -1)
i -= 1
predict_actions = []
predict_labels = []
predict_labelsA = []
predict_predicates = []
# vars for testing valid actions
is_entity = set()
has_root = False
is_edge = set()
is_confirmed = set()
is_functionword = set(bufferint)
is_functionword.discard(-1)
swapped_words = {}
lemmas = None
if self.use_attention:
forward_output, _ = self.forward_lstm(token_embedding.unsqueeze(1))
backward_output, _ = self.backward_lstm(utils.reverse_sequence(token_embedding, gpu=self.use_gpu).unsqueeze(1))
backward_output = utils.reverse_sequence(backward_output, gpu=self.use_gpu)
while not (len(bufferint) == 0 and len(stackint) == 0):
label = ''
labelA = ''
predicate = ''
extra = []
if self.use_attention:
last_state = torch.cat([action.output(), stack.output()], 1)
x = torch.cat([forward_output, backward_output], 2).squeeze(1)
y = torch.matmul(self.attention_weights, last_state.squeeze(0))
attention = torch.softmax(torch.matmul(x, y), dim=0)
attention_output = torch.matmul(x.transpose(0, 1), attention).unsqueeze(0)
attention_output = torch.tanh(self.attention_ff1_1(attention_output))
extra += [attention_output]
if self.use_function_words_all:
extra += [functionwords.output()]
lstms_output = [stack.output(), buffer.output(), action.output()] + extra
if not (self.weight_inputs or self.attend_inputs):
lstms_output = torch.cat(lstms_output, 1)
if self.attend_inputs:
stack_state = torch.tanh(self.prevent_overfitting(self.dropout(stack.output())))
future_label_loss = []
if mode == 'train':
valid_actions = list(self.action2idx.values())
else:
extra = functionwords.output() if self.use_function_words_rels else None
if self.weight_inputs:
inputs = self.weight_vectors(self.label_attention, lstms_output)
elif self.attend_inputs:
inputs = self.weight_vectors(self.label_attention(stack_state).squeeze(), lstms_output)
else:
inputs = lstms_output
label_embedding, label, future_label_loss, correct = self.predict_with_softmax(
self.label_softmax1, self.label_softmax2, inputs, labelsO, self.labelsO,
labelO_embeds, self.idx2labelO, 'predict', future_label_loss, action_count, extra=extra)
valid_actions = self.get_possible_actions(stackint, bufferint, latentint, has_root, is_entity, is_edge,
label, is_confirmed, swapped_words)
if self.weight_inputs:
inputs = self.weight_vectors(self.action_attention, lstms_output)
elif self.attend_inputs:
inputs = self.weight_vectors(self.action_attention(stack_state).squeeze(), lstms_output)
else:
inputs = lstms_output
act_embedding, real_action, action_losses, correct = self.predict_with_softmax(self.action_softmax1,
self.action_softmax2,
inputs, actions,
valid_actions, action_embeds,
self.idx2action, mode,
action_losses, action_count,
self.action_confusion_matrix)
if mode == 'train':
self.action_acc.add(correct)
# Perform Action
action.push(act_embedding, real_action)
# SHIFT
if real_action.startswith('SH'):
buffer0 = buffer.pop()
if len(bufferint) == 0 or not buffer0:
# CLOSE
bufferint = []
stackint = []
predict_actions.append(real_action)
predict_labels.append(label)
predict_labelsA.append(labelA)
predict_predicates.append(predicate)
break
s0 = bufferint.pop()
stackint.append(s0)
tok_buffer_embedding, buffer_token = buffer0
stack.push(tok_buffer_embedding, buffer_token)
# REDUCE
elif real_action.startswith('RE'):
s0 = stackint.pop()
tok_stack0_embedding, stack0_token = stack.pop()
if self.use_function_words and s0 in is_functionword:
functionwords.push(tok_stack0_embedding, stack0_token)
# UNSHIFT
elif real_action.startswith('UN'):
s0 = stackint.pop()
s1 = stackint.pop()
bufferint.append(s1)
stackint.append(s0)
tok_stack0_embedding, stack0_token = stack.pop()
tok_stack1_embedding, stack1_token = stack.pop()
stack.push(tok_stack0_embedding, stack0_token)
buffer.push(tok_stack1_embedding, stack1_token)
if s1 not in swapped_words:
swapped_words[s1] = []
swapped_words[s1].append(s0)
# MERGE
elif real_action.startswith('ME'):
s0 = stackint.pop()
tok_stack0_embedding, stack0_token = stack.pop()
tok_stack1_embedding, stack1_token = stack.pop()
head_embedding = torch.tanh(
self.merge_composition(torch.cat((tok_stack0_embedding, tok_stack1_embedding), dim=1)))
stack.push(head_embedding, stack1_token)
is_functionword.discard(stackint[-1])
# DEPENDENT
elif real_action.startswith('DE'):
tok_stack0_embedding, stack0_token = stack.pop()
head_embedding = torch.tanh(
self.dep_composition(torch.cat((tok_stack0_embedding, act_embedding), dim=1)))
stack.push(head_embedding, stack0_token)
is_functionword.discard(stackint[-1])
# RA
elif real_action.startswith('RA'):
if mode == 'train' or mode == 'sample':
extra = functionwords.output() if self.use_function_words_rels else None
if self.weight_inputs:
inputs = self.weight_vectors(self.label_attention, lstms_output)
elif self.attend_inputs:
inputs = self.weight_vectors(self.label_attention(stack_state).squeeze(), lstms_output)
else:
inputs = lstms_output
label_embedding, label, label_losses, correct = self.predict_with_softmax(
self.label_softmax1, self.label_softmax2, inputs, labelsO, self.labelsO,
labelO_embeds, self.idx2labelO, mode, label_losses, action_count, self.label_confusion_matrix,
extra=extra)
self.label_acc.add(correct)
tok_stack0_embedding, stack0_token = stack.pop()
tok_stack1_embedding, stack1_token = stack.pop()
head_embedding = torch.tanh(self.arc_composition_head(
torch.cat((tok_stack1_embedding, label_embedding, tok_stack0_embedding), dim=1)))
dep_embedding = tok_stack0_embedding
stack.push(head_embedding, stack1_token)
stack.push(dep_embedding, stack0_token)
if label == 'root':
has_root = True
is_edge.add((stackint[-2], label, stackint[-1]))
if label.startswith('ARG') and (not label.endswith('of')):
is_edge.add((stackint[-2], label))
is_functionword.discard(stackint[-1])
is_functionword.discard(stackint[-2])
# LA
elif real_action.startswith('LA'):
if mode == 'train' or mode == 'sample':
extra = functionwords.output() if self.use_function_words_rels else None
if self.weight_inputs:
inputs = self.weight_vectors(self.label_attention, lstms_output)
elif self.attend_inputs:
inputs = self.weight_vectors(self.label_attention(stack_state).squeeze(), lstms_output)
else:
inputs = lstms_output
label_embedding, label, label_losses, correct = self.predict_with_softmax(
self.label_softmax1, self.label_softmax2, inputs, labelsO, self.labelsO,
labelO_embeds, self.idx2labelO, mode, label_losses, action_count, self.label_confusion_matrix,
extra=extra)
self.label_acc.add(correct)
tok_stack0_embedding, stack0_token = stack.pop()
tok_stack1_embedding, stack1_token = stack.pop()
head_embedding = torch.tanh(self.arc_composition_head(
torch.cat((tok_stack0_embedding, label_embedding, tok_stack1_embedding), dim=1)))
dep_embedding = tok_stack1_embedding
stack.push(dep_embedding, stack1_token)
stack.push(head_embedding, stack0_token)
if label == 'root':
has_root = True
is_edge.add((stackint[-1], label, stackint[-2]))
if label.startswith('ARG') and (not label.endswith('of')):
is_edge.add((stackint[-1], label))
is_functionword.discard(stackint[-1])
is_functionword.discard(stackint[-2])
# PRED
elif real_action.startswith('PRED'):
tok = stack.last()[1]
pred_softmax1 = self.pred_softmax1
pred_softmax2 = self.pred_softmax2
if self.weight_inputs or self.attend_inputs:
pred_attention = self.pred_attention
if self.parse_unaligned and tok == '<unaligned>':
pred_softmax1 = self.pred_softmax1_unaligned
pred_softmax2 = self.pred_softmax2_unaligned
if self.weight_inputs or self.attend_inputs:
pred_attention = self.pred_attention_unaligned
if self.weight_inputs:
inputs = self.weight_vectors(pred_attention, lstms_output)
elif self.attend_inputs:
inputs = self.weight_vectors(pred_attention(stack_state).squeeze(), lstms_output)
else:
inputs = lstms_output
possible_predicates = []
if tok in self.possible_predicates:
for p in self.possible_predicates[tok]:
if p in self.pred2idx:
possible_predicates.append(self.pred2idx[p])
if mode == 'train' and preds[action_count].item() not in possible_predicates:
possible_predicates = self.preds
if not possible_predicates or stackint[-1] == self.word2idx['<unk>']:
pred_embedding = self.pred_unk_embed
# FIXME: Temporary fix for tok not in tokens killing threads
if self.lemmatizer is not None and tok in tokens:
if not lemmas:
lemmas = self.lemmatizer(tokens)
lemma = lemmas[tokens.index(tok)].lemma_
predicate = lemma
else:
predicate = tok
else:
pred_embedding, predicate, pred_losses, correct = self.predict_with_softmax(
pred_softmax1, pred_softmax2, inputs, preds, possible_predicates,
pred_embeds, self.idx2pred, mode, pred_losses, action_count)
if mode == 'train':
self.pred_acc.add(correct)
tok_stack0_embedding, stack0_token = stack.pop()
pred_embedding = torch.tanh(
self.pred_composition(torch.cat((pred_embedding, tok_stack0_embedding), dim=1)))
stack.push(pred_embedding, predicate)
is_confirmed.add(stackint[-1])
is_functionword.discard(stackint[-1])
# ADDNODE
elif real_action.startswith('AD'):
if self.weight_inputs:
inputs = self.weight_vectors(self.labelA_attention, lstms_output)
elif self.attend_inputs:
inputs = self.weight_vectors(self.labelA_attention(stack_state).squeeze(), lstms_output)
else:
inputs = lstms_output
labelA_embedding, labelA, labelA_losses, correct = self.predict_with_softmax(
self.labelA_softmax1, self.labelA_softmax2, inputs, labelsA, self.labelsA,
labelA_embeds, self.idx2labelA, mode, labelA_losses, action_count)
if mode == 'train':
self.labelA_acc.add(correct)
tok_stack0_embedding, stack0_token = stack.pop()
head_embedding = torch.tanh(
self.addnode_composition(torch.cat((tok_stack0_embedding, labelA_embedding), dim=1)))
stack.push(head_embedding, stack0_token)
is_entity.add(stackint[-1])
is_functionword.discard(stackint[-1])
# INTRODUCE
elif real_action.startswith('IN'):
tok_latent_embedding, latent_token = latent.pop()
stack.push(tok_latent_embedding, latent_token)
item = latentint.pop()
stackint.append(item)
predict_actions.append(real_action)
predict_labels.append(label)
predict_labelsA.append(labelA)
predict_predicates.append(predicate)
action_count += 1
if mode == 'train' and (
actions[action_count].item() == 0 or (actions[action_count].item() == 1 and len(bufferint) == 0)):
bufferint = []
stackint = []
if action_count > 500:
bufferint = []
stackint = []
for l in [action_losses, label_losses, labelA_losses, pred_losses]:
total_losses.extend(l)
if len(total_losses) > 0:
total_losses = -torch.sum(torch.stack(total_losses))
else:
total_losses = -1
losses_per_component = []
for i, loss in enumerate([action_losses, label_losses, labelA_losses, pred_losses]):
if len(loss) > 0:
losses_per_component.append(-torch.sum(torch.stack(loss)).item())
else:
losses_per_component.append(0)
self.action_loss += losses_per_component[0]
self.label_loss += losses_per_component[1]
self.labelA_loss += losses_per_component[2]
self.pred_loss += losses_per_component[3]
return total_losses, predict_actions, predict_labels, predict_labelsA, predict_predicates
# return embedding, string and update losses (if it is in training mode)
def predict_with_softmax(self, softmax1, softmax2, lstms_output, tensor, elements, embeds, idx2map, mode,
losses, action_count, confusion_matrix=None, extra=None):
if extra is not None:
hidden_output = torch.tanh(softmax1(self.dropout(torch.cat((lstms_output, extra), dim=1))))
else:
hidden_output = torch.tanh(softmax1(self.dropout(lstms_output)))
if self.use_gpu is True:
logits = softmax2(hidden_output)[0][torch.LongTensor(elements).cuda()]
else:
logits = softmax2(hidden_output)[0][torch.LongTensor(elements)]
tbl = {a: i for i, a in enumerate(elements)}
log_probs = torch.nn.functional.log_softmax(logits, dim=0)
# if mode is sample then dont pick the argmax
idx = 0
if mode != 'sample':
idx = log_probs.argmax().item()
else:
log_dist = log_probs
if random.randint(1, 20) == 1:
log_dist = torch.mul(log_dist, 0.5)
dist = torch.distributions.categorical.Categorical(logits=log_dist)
idx = dist.sample()
losses.append(log_probs[idx])
predict = elements[idx]
pred_string = idx2map[predict]
if mode == 'train':
if log_probs is not None:
losses.append(log_probs[tbl[tensor[action_count].item()]])
gold_string = idx2map[tensor[action_count].item()]
embedding = embeds[action_count].unsqueeze(0)
if confusion_matrix:
confusion_matrix.add(gold_string, pred_string)
return embedding, gold_string, losses, (pred_string == gold_string)
elif mode == 'predict' or mode == 'sample':
predict_tensor = torch.from_numpy(np.array([predict])).cuda() if self.use_gpu else torch.from_numpy(
np.array([predict]))
embeddings = self.dropout_emb(embeds(predict_tensor))
embedding = embeddings[0].unsqueeze(0)
return embedding, pred_string, losses, False
def weight_vectors(self, weights, vecs):
if not self.warm_up:
weights = self.state_size * F.softmax(weights, dim=0)
for i, vec in enumerate(vecs):
vecs[i] = torch.mul(weights[i], vec)
return torch.cat(vecs, 0).sum(dim=0).unsqueeze(0)
def get_possible_actions(self, stackint, bufferint, latentint, has_root, is_entity, is_edge, label, is_confirmed,
swapped_words):
valid_actions = []
for k, v in self.action2idx.items():
if k.startswith('SH') or k.startswith('CL'):
if len(bufferint) == 0 and len(stackint) > 5:
continue
valid_actions.append(v)
elif k.startswith('RE'):
if len(stackint) >= 1:
valid_actions.append(v)
elif k.startswith('PR'):
if len(stackint) >= 1 and stackint[-1] not in is_confirmed and stackint[-1] != -1:
valid_actions.append(v)
elif k.startswith('UN'):
if len(stackint) >= 2:
stack0 = stackint[-1]
stack1 = stackint[-2]
if stack0 in swapped_words and stack1 in swapped_words[stack0]:
continue
if stack1 | |
""" Cisco_IOS_XR_sysadmin_entity_mib
This module contains a collection of YANG
definitions for Cisco IOS\-XR SysAdmin configuration.
Copyright(c) 2015\-2017 by Cisco Systems, Inc.
All rights reserved.
"""
from collections import OrderedDict
from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.filters import YFilter
from ydk.errors import YError, YModelError
from ydk.errors.error_handler import handle_type_error as _handle_type_error
class PhysicalClass(Enum):
"""
PhysicalClass (Enum Class)
.. data:: other = 1
.. data:: unknown = 2
.. data:: chassis = 3
.. data:: backplane = 4
.. data:: container = 5
.. data:: powerSupply = 6
.. data:: fan = 7
.. data:: sensor = 8
.. data:: module = 9
.. data:: port = 10
.. data:: stack = 11
"""
other = Enum.YLeaf(1, "other")
unknown = Enum.YLeaf(2, "unknown")
chassis = Enum.YLeaf(3, "chassis")
backplane = Enum.YLeaf(4, "backplane")
container = Enum.YLeaf(5, "container")
powerSupply = Enum.YLeaf(6, "powerSupply")
fan = Enum.YLeaf(7, "fan")
sensor = Enum.YLeaf(8, "sensor")
module = Enum.YLeaf(9, "module")
port = Enum.YLeaf(10, "port")
stack = Enum.YLeaf(11, "stack")
class ENTITYMIB(Entity):
"""
.. attribute:: entitygeneral
**type**\: :py:class:`Entitygeneral <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_entity_mib.ENTITYMIB.Entitygeneral>`
.. attribute:: entphysicaltable
**type**\: :py:class:`Entphysicaltable <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_entity_mib.ENTITYMIB.Entphysicaltable>`
.. attribute:: entlogicaltable
**type**\: :py:class:`Entlogicaltable <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_entity_mib.ENTITYMIB.Entlogicaltable>`
.. attribute:: entlpmappingtable
**type**\: :py:class:`Entlpmappingtable <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_entity_mib.ENTITYMIB.Entlpmappingtable>`
.. attribute:: entaliasmappingtable
**type**\: :py:class:`Entaliasmappingtable <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_entity_mib.ENTITYMIB.Entaliasmappingtable>`
.. attribute:: entphysicalcontainstable
**type**\: :py:class:`Entphysicalcontainstable <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_entity_mib.ENTITYMIB.Entphysicalcontainstable>`
"""
_prefix = 'ENTITY_MIB'
_revision = '2017-04-12'
def __init__(self):
super(ENTITYMIB, self).__init__()
self._top_entity = None
self.yang_name = "ENTITY-MIB"
self.yang_parent_name = "Cisco-IOS-XR-sysadmin-entity-mib"
self.is_top_level_class = True
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_container_classes = OrderedDict([("entityGeneral", ("entitygeneral", ENTITYMIB.Entitygeneral)), ("entPhysicalTable", ("entphysicaltable", ENTITYMIB.Entphysicaltable)), ("entLogicalTable", ("entlogicaltable", ENTITYMIB.Entlogicaltable)), ("entLPMappingTable", ("entlpmappingtable", ENTITYMIB.Entlpmappingtable)), ("entAliasMappingTable", ("entaliasmappingtable", ENTITYMIB.Entaliasmappingtable)), ("entPhysicalContainsTable", ("entphysicalcontainstable", ENTITYMIB.Entphysicalcontainstable))])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict()
self.entitygeneral = ENTITYMIB.Entitygeneral()
self.entitygeneral.parent = self
self._children_name_map["entitygeneral"] = "entityGeneral"
self._children_yang_names.add("entityGeneral")
self.entphysicaltable = ENTITYMIB.Entphysicaltable()
self.entphysicaltable.parent = self
self._children_name_map["entphysicaltable"] = "entPhysicalTable"
self._children_yang_names.add("entPhysicalTable")
self.entlogicaltable = ENTITYMIB.Entlogicaltable()
self.entlogicaltable.parent = self
self._children_name_map["entlogicaltable"] = "entLogicalTable"
self._children_yang_names.add("entLogicalTable")
self.entlpmappingtable = ENTITYMIB.Entlpmappingtable()
self.entlpmappingtable.parent = self
self._children_name_map["entlpmappingtable"] = "entLPMappingTable"
self._children_yang_names.add("entLPMappingTable")
self.entaliasmappingtable = ENTITYMIB.Entaliasmappingtable()
self.entaliasmappingtable.parent = self
self._children_name_map["entaliasmappingtable"] = "entAliasMappingTable"
self._children_yang_names.add("entAliasMappingTable")
self.entphysicalcontainstable = ENTITYMIB.Entphysicalcontainstable()
self.entphysicalcontainstable.parent = self
self._children_name_map["entphysicalcontainstable"] = "entPhysicalContainsTable"
self._children_yang_names.add("entPhysicalContainsTable")
self._segment_path = lambda: "Cisco-IOS-XR-sysadmin-entity-mib:ENTITY-MIB"
class Entitygeneral(Entity):
"""
.. attribute:: entlastchangetime
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'ENTITY_MIB'
_revision = '2017-04-12'
def __init__(self):
super(ENTITYMIB.Entitygeneral, self).__init__()
self.yang_name = "entityGeneral"
self.yang_parent_name = "ENTITY-MIB"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('entlastchangetime', YLeaf(YType.uint32, 'entLastChangeTime')),
])
self.entlastchangetime = None
self._segment_path = lambda: "entityGeneral"
self._absolute_path = lambda: "Cisco-IOS-XR-sysadmin-entity-mib:ENTITY-MIB/%s" % self._segment_path()
def __setattr__(self, name, value):
self._perform_setattr(ENTITYMIB.Entitygeneral, ['entlastchangetime'], name, value)
class Entphysicaltable(Entity):
"""
.. attribute:: entphysicalentry
**type**\: list of :py:class:`Entphysicalentry <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_entity_mib.ENTITYMIB.Entphysicaltable.Entphysicalentry>`
"""
_prefix = 'ENTITY_MIB'
_revision = '2017-04-12'
def __init__(self):
super(ENTITYMIB.Entphysicaltable, self).__init__()
self.yang_name = "entPhysicalTable"
self.yang_parent_name = "ENTITY-MIB"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("entPhysicalEntry", ("entphysicalentry", ENTITYMIB.Entphysicaltable.Entphysicalentry))])
self._leafs = OrderedDict()
self.entphysicalentry = YList(self)
self._segment_path = lambda: "entPhysicalTable"
self._absolute_path = lambda: "Cisco-IOS-XR-sysadmin-entity-mib:ENTITY-MIB/%s" % self._segment_path()
def __setattr__(self, name, value):
self._perform_setattr(ENTITYMIB.Entphysicaltable, [], name, value)
class Entphysicalentry(Entity):
"""
.. attribute:: entphysicalindex (key)
**type**\: int
**range:** 1..2147483647
.. attribute:: entphysicaldescr
**type**\: str
**length:** 0..255
.. attribute:: entphysicalvendortype
**type**\: str
**pattern:** (([0\-1](\\.[1\-3]?[0\-9]))\|(2\\.(0\|([1\-9]\\d\*))))(\\.(0\|([1\-9]\\d\*)))\*
.. attribute:: entphysicalcontainedin
**type**\: int
**range:** 0..2147483647
.. attribute:: entphysicalclass
**type**\: :py:class:`PhysicalClass <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_entity_mib.PhysicalClass>`
.. attribute:: entphysicalparentrelpos
**type**\: int
**range:** \-1..2147483647
.. attribute:: entphysicalname
**type**\: str
**length:** 0..255
.. attribute:: entphysicalhardwarerev
**type**\: str
**length:** 0..255
.. attribute:: entphysicalfirmwarerev
**type**\: str
**length:** 0..255
.. attribute:: entphysicalsoftwarerev
**type**\: str
**length:** 0..255
.. attribute:: entphysicalserialnum
**type**\: str
**length:** 0..32
.. attribute:: entphysicalmfgname
**type**\: str
**length:** 0..255
.. attribute:: entphysicalmodelname
**type**\: str
**length:** 0..255
.. attribute:: entphysicalalias
**type**\: str
**length:** 0..32
.. attribute:: entphysicalassetid
**type**\: str
**length:** 0..32
.. attribute:: entphysicalisfru
**type**\: :py:class:`TruthValue <ydk.models.cisco_ios_xr.SNMPv2_TC.TruthValue>`
"""
_prefix = 'ENTITY_MIB'
_revision = '2017-04-12'
def __init__(self):
super(ENTITYMIB.Entphysicaltable.Entphysicalentry, self).__init__()
self.yang_name = "entPhysicalEntry"
self.yang_parent_name = "entPhysicalTable"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['entphysicalindex']
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('entphysicalindex', YLeaf(YType.int32, 'entPhysicalIndex')),
('entphysicaldescr', YLeaf(YType.str, 'entPhysicalDescr')),
('entphysicalvendortype', YLeaf(YType.str, 'entPhysicalVendorType')),
('entphysicalcontainedin', YLeaf(YType.int32, 'entPhysicalContainedIn')),
('entphysicalclass', YLeaf(YType.enumeration, 'entPhysicalClass')),
('entphysicalparentrelpos', YLeaf(YType.int32, 'entPhysicalParentRelPos')),
('entphysicalname', YLeaf(YType.str, 'entPhysicalName')),
('entphysicalhardwarerev', YLeaf(YType.str, 'entPhysicalHardwareRev')),
('entphysicalfirmwarerev', YLeaf(YType.str, 'entPhysicalFirmwareRev')),
('entphysicalsoftwarerev', YLeaf(YType.str, 'entPhysicalSoftwareRev')),
('entphysicalserialnum', YLeaf(YType.str, 'entPhysicalSerialNum')),
('entphysicalmfgname', YLeaf(YType.str, 'entPhysicalMfgName')),
('entphysicalmodelname', YLeaf(YType.str, 'entPhysicalModelName')),
('entphysicalalias', YLeaf(YType.str, 'entPhysicalAlias')),
('entphysicalassetid', YLeaf(YType.str, 'entPhysicalAssetID')),
('entphysicalisfru', YLeaf(YType.enumeration, 'entPhysicalIsFRU')),
])
self.entphysicalindex = None
self.entphysicaldescr = None
self.entphysicalvendortype = None
self.entphysicalcontainedin = None
self.entphysicalclass = None
self.entphysicalparentrelpos = None
self.entphysicalname = None
self.entphysicalhardwarerev = None
self.entphysicalfirmwarerev = None
self.entphysicalsoftwarerev = None
self.entphysicalserialnum = None
self.entphysicalmfgname = None
self.entphysicalmodelname = None
self.entphysicalalias = None
self.entphysicalassetid = None
self.entphysicalisfru = None
self._segment_path = lambda: "entPhysicalEntry" + "[entPhysicalIndex='" + str(self.entphysicalindex) + "']"
self._absolute_path = lambda: "Cisco-IOS-XR-sysadmin-entity-mib:ENTITY-MIB/entPhysicalTable/%s" % self._segment_path()
def __setattr__(self, name, value):
self._perform_setattr(ENTITYMIB.Entphysicaltable.Entphysicalentry, ['entphysicalindex', 'entphysicaldescr', 'entphysicalvendortype', 'entphysicalcontainedin', 'entphysicalclass', 'entphysicalparentrelpos', 'entphysicalname', 'entphysicalhardwarerev', 'entphysicalfirmwarerev', 'entphysicalsoftwarerev', 'entphysicalserialnum', 'entphysicalmfgname', 'entphysicalmodelname', 'entphysicalalias', 'entphysicalassetid', 'entphysicalisfru'], name, value)
class Entlogicaltable(Entity):
"""
.. attribute:: entlogicalentry
**type**\: list of :py:class:`Entlogicalentry <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_entity_mib.ENTITYMIB.Entlogicaltable.Entlogicalentry>`
"""
_prefix = 'ENTITY_MIB'
_revision = '2017-04-12'
def __init__(self):
super(ENTITYMIB.Entlogicaltable, self).__init__()
self.yang_name = "entLogicalTable"
self.yang_parent_name = "ENTITY-MIB"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("entLogicalEntry", ("entlogicalentry", ENTITYMIB.Entlogicaltable.Entlogicalentry))])
self._leafs = OrderedDict()
self.entlogicalentry = YList(self)
self._segment_path = lambda: "entLogicalTable"
self._absolute_path = lambda: "Cisco-IOS-XR-sysadmin-entity-mib:ENTITY-MIB/%s" % self._segment_path()
def __setattr__(self, name, value):
self._perform_setattr(ENTITYMIB.Entlogicaltable, [], name, value)
class Entlogicalentry(Entity):
"""
.. attribute:: entlogicalindex (key)
**type**\: int
**range:** 1..2147483647
.. attribute:: entlogicaldescr
**type**\: str
**length:** 0..255
.. attribute:: entlogicaltype
**type**\: str
**pattern:** (([0\-1](\\.[1\-3]?[0\-9]))\|(2\\.(0\|([1\-9]\\d\*))))(\\.(0\|([1\-9]\\d\*)))\*
.. attribute:: entlogicalcommunity
**type**\: str
**pattern:** (([0\-9a\-fA\-F]){2}(\:([0\-9a\-fA\-F]){2})\*)?
.. attribute:: entlogicaltaddress
**type**\: str
**pattern:** (\\d\*(.\\d\*)\*)?
.. attribute:: entlogicaltdomain
**type**\: str
**pattern:** (([0\-1](\\.[1\-3]?[0\-9]))\|(2\\.(0\|([1\-9]\\d\*))))(\\.(0\|([1\-9]\\d\*)))\*
.. attribute:: entlogicalcontextengineid
**type**\: str
**pattern:** (([0\-9a\-fA\-F]){2}(\:([0\-9a\-fA\-F]){2})\*)?
.. attribute:: entlogicalcontextname
**type**\: str
**length:** 0..255
"""
_prefix = 'ENTITY_MIB'
_revision = '2017-04-12'
def __init__(self):
super(ENTITYMIB.Entlogicaltable.Entlogicalentry, self).__init__()
self.yang_name = "entLogicalEntry"
self.yang_parent_name = "entLogicalTable"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['entlogicalindex']
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('entlogicalindex', YLeaf(YType.int32, 'entLogicalIndex')),
('entlogicaldescr', YLeaf(YType.str, 'entLogicalDescr')),
('entlogicaltype', YLeaf(YType.str, 'entLogicalType')),
('entlogicalcommunity', YLeaf(YType.str, 'entLogicalCommunity')),
('entlogicaltaddress', YLeaf(YType.str, 'entLogicalTAddress')),
('entlogicaltdomain', YLeaf(YType.str, 'entLogicalTDomain')),
('entlogicalcontextengineid', YLeaf(YType.str, 'entLogicalContextEngineID')),
('entlogicalcontextname', YLeaf(YType.str, 'entLogicalContextName')),
])
self.entlogicalindex = None
self.entlogicaldescr = None
self.entlogicaltype = None
self.entlogicalcommunity = None
self.entlogicaltaddress = None
self.entlogicaltdomain = None
self.entlogicalcontextengineid = None
self.entlogicalcontextname = None
self._segment_path = lambda: "entLogicalEntry" + "[entLogicalIndex='" + str(self.entlogicalindex) + "']"
self._absolute_path = lambda: "Cisco-IOS-XR-sysadmin-entity-mib:ENTITY-MIB/entLogicalTable/%s" % self._segment_path()
def __setattr__(self, name, value):
self._perform_setattr(ENTITYMIB.Entlogicaltable.Entlogicalentry, ['entlogicalindex', 'entlogicaldescr', 'entlogicaltype', 'entlogicalcommunity', 'entlogicaltaddress', 'entlogicaltdomain', 'entlogicalcontextengineid', 'entlogicalcontextname'], name, value)
class Entlpmappingtable(Entity):
"""
.. attribute:: entlpmappingentry
**type**\: list of :py:class:`Entlpmappingentry <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_entity_mib.ENTITYMIB.Entlpmappingtable.Entlpmappingentry>`
"""
_prefix = 'ENTITY_MIB'
_revision = '2017-04-12'
def __init__(self):
super(ENTITYMIB.Entlpmappingtable, self).__init__()
self.yang_name = "entLPMappingTable"
self.yang_parent_name = "ENTITY-MIB"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("entLPMappingEntry", ("entlpmappingentry", ENTITYMIB.Entlpmappingtable.Entlpmappingentry))])
self._leafs = OrderedDict()
self.entlpmappingentry = YList(self)
self._segment_path = lambda: "entLPMappingTable"
self._absolute_path = lambda: "Cisco-IOS-XR-sysadmin-entity-mib:ENTITY-MIB/%s" % self._segment_path()
def __setattr__(self, name, value):
self._perform_setattr(ENTITYMIB.Entlpmappingtable, [], name, value)
class Entlpmappingentry(Entity):
"""
.. attribute:: entlogicalindex (key)
**type**\: int
**range:** 1..2147483647
.. attribute:: entlpphysicalindex (key)
**type**\: int
**range:** 1..2147483647
"""
_prefix = 'ENTITY_MIB'
_revision = '2017-04-12'
def __init__(self):
super(ENTITYMIB.Entlpmappingtable.Entlpmappingentry, self).__init__()
self.yang_name = "entLPMappingEntry"
self.yang_parent_name = "entLPMappingTable"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['entlogicalindex','entlpphysicalindex']
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('entlogicalindex', YLeaf(YType.int32, 'entLogicalIndex')),
('entlpphysicalindex', YLeaf(YType.int32, 'entLPPhysicalIndex')),
])
self.entlogicalindex = None
self.entlpphysicalindex = None
self._segment_path = lambda: "entLPMappingEntry" | |
"Mono",
"1378-9": "Nanticoke",
"1380-5": "Narragansett",
"1382-1": "Navajo",
"1383-9": "<NAME>",
"1384-7": "<NAME>",
"1385-4": "<NAME>",
"1387-0": "<NAME>",
"1389-6": "Nomalaki",
"1391-2": "Northwest Tribes",
"1392-0": "Alsea",
"1393-8": "Celilo",
"1394-6": "Columbia",
"1395-3": "Kalapuya",
"1396-1": "Molala",
"1397-9": "Talakamish",
"1398-7": "Tenino",
"1399-5": "Tillamook",
"1400-1": "Wenatchee",
"1401-9": "Yahooskin",
"1403-5": "Omaha",
"1405-0": "<NAME>",
"1407-6": "Osage",
"1409-2": "Otoe-Missouria",
"1411-8": "Ottawa",
"1412-6": "Burt Lake Ottawa",
"1413-4": "Michigan Ottawa",
"1414-2": "Oklahoma Ottawa",
"1416-7": "Paiute",
"1417-5": "Bishop",
"1418-3": "Bridgeport",
"1419-1": "Burns Paiute",
"1420-9": "Cedarville",
"1421-7": "Fort Bidwell",
"1422-5": "Fort Independence",
"1423-3": "Kaibab",
"1424-1": "Las Vegas",
"1425-8": "Lone Pine",
"1426-6": "Lovelock",
"1427-4": "Malheur Paiute",
"1428-2": "Moapa",
"1429-0": "Northern Paiute",
"1430-8": "Owens Valley",
"1431-6": "Pyramid Lake",
"1432-4": "San Juan Southern Paiute",
"1433-2": "Southern Paiute",
"1434-0": "Summit Lake",
"1435-7": "Utu Utu Gwaitu Paiute",
"1436-5": "Walker River",
"1437-3": "Yerington Paiute",
"1439-9": "Pamunkey",
"1441-5": "Passamaquoddy",
"1442-3": "Indian Township",
"1443-1": "Pleasant Point Passamaquoddy",
"1445-6": "Pawnee",
"1446-4": "Oklahoma Pawnee",
"1448-0": "Penobscot",
"1450-6": "Peoria",
"1451-4": "Oklahoma Peoria",
"1453-0": "Pequot",
"1454-8": "Marshantucket Pequot",
"1456-3": "Pima",
"1457-1": "Gila River Pima-Maricopa",
"1458-9": "Salt River Pima-Maricopa",
"1460-5": "Piscataway",
"1462-1": "Pit River",
"1464-7": "Pomo",
"1465-4": "Central Pomo",
"1466-2": "Dry Creek",
"1467-0": "Eastern Pomo",
"1468-8": "Kashia",
"1469-6": "Northern Pomo",
"1470-4": "Scotts Valley",
"1471-2": "Stonyford",
"1472-0": "Sulphur Bank",
"1474-6": "Ponca",
"1475-3": "Nebraska Ponca",
"1476-1": "Oklahoma Ponca",
"1478-7": "Potawatomi",
"1479-5": "Citizen Band Potawatomi",
"1480-3": "Forest County",
"1481-1": "Hannahville",
"1482-9": "Huron Potawatomi",
"1483-7": "Pokagon Potawatomi",
"1484-5": "Prairie Band",
"1485-2": "Wisconsin Potawatomi",
"1487-8": "Powhatan",
"1489-4": "Pueblo",
"1490-2": "Acoma",
"1491-0": "Arizona Tewa",
"1492-8": "Cochiti",
"1493-6": "Hopi",
"1494-4": "Isleta",
"1495-1": "Jemez",
"1496-9": "Keres",
"1497-7": "Laguna",
"1498-5": "Nambe",
"1499-3": "Picuris",
"1500-8": "Piro",
"1501-6": "Pojoaque",
"1502-4": "San Felipe",
"1503-2": "San Ildefonso",
"1504-0": "San Juan Pueblo",
"1505-7": "San Juan De",
"1506-5": "San Juan",
"1507-3": "Sandia",
"1508-1": "Santa Ana",
"1509-9": "Santa Clara",
"1510-7": "Santo Domingo",
"1511-5": "Taos",
"1512-3": "Tesuque",
"1513-1": "Tewa",
"1514-9": "Tigua",
"1515-6": "Zia",
"1516-4": "Zuni",
"1518-0": "Puget Sound Salish",
"1519-8": "Duwamish",
"1520-6": "Kikiallus",
"1521-4": "Lower Skagit",
"1522-2": "Muckleshoot",
"1523-0": "Nisqually",
"1524-8": "Nooksack",
"1525-5": "Port Madison",
"1526-3": "Puyallup",
"1527-1": "Samish",
"1528-9": "Sauk-Suiattle",
"1529-7": "Skokomish",
"1530-5": "Skykomish",
"1531-3": "Snohomish",
"1532-1": "Snoqualmie",
"1533-9": "Squaxin Island",
"1534-7": "Steilacoom",
"1535-4": "Stillaguamish",
"1536-2": "Suquamish",
"1537-0": "Swinomish",
"1538-8": "Tulalip",
"1539-6": "Upper Skagit",
"1541-2": "Quapaw",
"1543-8": "Quinault",
"1545-3": "Rappahannock",
"1547-9": "Reno-Sparks",
"1549-5": "Round Valley",
"1551-1": "Sac and Fox",
"1552-9": "Iowa Sac and Fox",
"1553-7": "Missouri Sac and Fox",
"1554-5": "Oklahoma Sac and Fox",
"1556-0": "Salinan",
"1558-6": "Salish",
"1560-2": "Salish and Kootenai",
"1562-8": "Schaghticoke",
"1564-4": "Scott Valley",
"1566-9": "Seminole",
"1567-7": "Big Cypress",
"1568-5": "Brighton",
"1569-3": "Florida Seminole",
"1570-1": "Hollywood Seminole",
"1571-9": "Oklahoma Seminole",
"1573-5": "Serrano",
"1574-3": "San Manual",
"1576-8": "Shasta",
"1578-4": "Shawnee",
"1579-2": "Absentee Shawnee",
"1580-0": "Eastern Shawnee",
"1582-6": "Shinnecock",
"1584-2": "Shoalwater Bay",
"1586-7": "Shoshone",
"1587-5": "Battle Mountain",
"1588-3": "Duckwater",
"1589-1": "Elko",
"1590-9": "Ely",
"1591-7": "Goshute",
"1592-5": "Panamint",
"1593-3": "Ruby Valley",
"1594-1": "Skull Valley",
"1595-8": "South Fork Shoshone",
"1596-6": "Te-Moak Western Shoshone",
"1597-4": "<NAME>",
"1598-2": "Washakie",
"1599-0": "Wind River Shoshone",
"1600-6": "Yomba",
"1602-2": "Shoshone Paiute",
"1603-0": "Duck Valley",
"1604-8": "Fallon",
"1605-5": "<NAME>",
"1607-1": "Siletz",
"1609-7": "Sioux",
"1610-5": "Blackfoot Sioux",
"1611-3": "Brule Sioux",
"1612-1": "Cheyenne River Sioux",
"1613-9": "Crow Creek Sioux",
"1614-7": "Dakota Sioux",
"1615-4": "Flandreau Santee",
"1616-2": "Fort Peck",
"1617-0": "Lake Traverse Sioux",
"1618-8": "Lower Brule Sioux",
"1619-6": "Lower Sioux",
"1620-4": "Mdewakanton Sioux",
"1621-2": "Miniconjou",
"1622-0": "Oglala Sioux",
"1623-8": "Pine Ridge Sioux",
"1624-6": "Pipestone Sioux",
"1625-3": "Prairie Island Sioux",
"1626-1": "Prior Lake Sioux",
"1627-9": "Rosebud Sioux",
"1628-7": "Sans Arc Sioux",
"1629-5": "Santee Sioux",
"1630-3": "Sisseton-Wahpeton",
"1631-1": "Sisseton Sioux",
"1632-9": "Spirit Lake Sioux",
"1633-7": "Standing Rock Sioux",
"1634-5": "Teton Sioux",
"1635-2": "Two Kettle Sioux",
"1636-0": "Upper Sioux",
"1637-8": "Wahpekute Sioux",
"1638-6": "Wahpeton Sioux",
"1639-4": "Wazhaza Sioux",
"1640-2": "Yankton Sioux",
"1641-0": "Yanktonai Sioux",
"1643-6": "Siuslaw",
"1645-1": "Spokane",
"1647-7": "Stewart",
"1649-3": "Stockbridge",
"1651-9": "Susanville",
"1653-5": "<NAME>",
"1654-3": "Ak-Chin",
"1655-0": "<NAME>",
"1656-8": "<NAME>",
"1657-6": "Sells",
"1659-2": "Tolowa",
"1661-8": "Tonkawa",
"1663-4": "Tygh",
"1665-9": "Umatilla",
"1667-5": "Umpqua",
"1668-3": "Cow Creek Umpqua",
"1670-9": "Ute",
"1671-7": "<NAME>",
"1672-5": "Uintah Ute",
"1673-3": "Ute Mountain Ute",
"1675-8": "Wailaki",
"1677-4": "Walla-Walla",
"1679-0": "Wampanoag",
"1680-8": "Gay Head Wampanoag",
"1681-6": "<NAME>",
"1683-2": "Warm Springs",
"1685-7": "Wascopum",
"1687-3": "Washoe",
"1688-1": "Alpine",
"1689-9": "Carson",
"1690-7": "Dresslerville",
"1692-3": "Wichita",
"1694-9": "Wind River",
"1696-4": "Winnebago",
"1697-2": "Ho-chunk",
"1698-0": "Nebraska Winnebago",
"1700-4": "Winnemucca",
"1702-0": "Wintun",
"1704-6": "Wiyot",
"1705-3": "Table Bluff",
"1707-9": "Yakama",
"1709-5": "Yakama Cowlitz",
"1711-1": "Yaqui",
"1712-9": "Bar<NAME>",
"1713-7": "Pascua Yaqui",
"1715-2": "Yavapai Apache",
"1717-8": "Yokuts",
"1718-6": "Chukchansi",
"1719-4": "Tachi",
"1720-2": "Tule River",
"1722-8": "Yuchi",
"1724-4": "Yuman",
"1725-1": "Cocopah",
"1726-9": "Havasupai",
"1727-7": "Hualapai",
"1728-5": "Maricopa",
"1729-3": "Mohave",
"1730-1": "Quechan",
"1731-9": "Yavapai",
"1732-7": "Yurok",
"1733-5": "Coast Yurok",
"1735-0": "Alaska Native",
"1737-6": "Alaska Indian",
"1739-2": "Alaskan Athabascan",
"1740-0": "Ahtna",
"1811-9": "Southeast Alaska",
"1813-5": "Tlingit-Haida",
"1814-3": "Angoon",
"1815-0": "Central Council of Tlingit and Haida Tribes",
"1816-8": "Chilkat",
"1817-6": "Chilkoot",
"1818-4": "Craig",
"1819-2": "Douglas",
"1820-0": "Haida",
"1821-8": "Hoonah",
"1822-6": "Hydaburg",
"1823-4": "Kake",
"1824-2": "Kasaan",
"1825-9": "Kenaitze",
"1826-7": "Ketchikan",
"1827-5": "Klawock",
"1828-3": "Pelican",
"1829-1": "Petersburg",
"1830-9": "Saxman",
"1831-7": "Sitka",
"1832-5": "Tenakee Springs",
"1833-3": "Tlingit",
"1834-1": "Wrangell",
"1835-8": "Yakutat",
"1837-4": "Tsimshian",
"1838-2": "Metlakatla",
"1840-8": "Eskimo",
"1842-4": "Greenland Eskimo",
"1844-0": "Inupiat Eskimo",
"1845-7": "Ambler",
"1846-5": "Anaktuvuk",
"1847-3": "Anaktuvuk Pass",
"1848-1": "Arctic Slope Inupiat",
"1849-9": "Arctic Slope Corporation",
"1850-7": "Atqasuk",
"1851-5": "Barrow",
"1852-3": "Bering Straits Inupiat",
"1853-1": "Brevig Mission",
"1854-9": "Buckland",
"1855-6": "Chinik",
"1856-4": "Council",
"1857-2": "Deering",
"1858-0": "Elim",
"1859-8": "Golovin",
"1860-6": "<NAME>",
"1861-4": "Inupiaq",
"1862-2": "Kaktovik",
"1863-0": "Kawerak",
"1864-8": "Kiana",
"1865-5": "Kivalina",
"1866-3": "Kobuk",
"1867-1": "Kotzebue",
"1868-9": "Koyuk",
"1869-7": "Kwiguk",
"1870-5": "<NAME>",
"1871-3": "<NAME>",
"1872-1": "Noatak",
"1873-9": "Nome",
"1874-7": "Noorvik",
"1875-4": "Nuiqsut",
"1876-2": "Point Hope",
"1877-0": "Point Lay",
"1878-8": "Selawik",
"1879-6": "Shaktoolik",
"1880-4": "Shishmaref",
"1881-2": "Shungnak",
"1882-0": "Solomon",
"1883-8": "Teller",
"1884-6": "Unalakleet",
"1885-3": "Wainwright",
"1886-1": "Wales",
"1887-9": "White Mountain",
"1888-7": "White Mountain Inupiat",
"1889-5": "<NAME>",
"1891-1": "<NAME>",
"1892-9": "Gambell",
"1893-7": "Savoonga",
"1894-5": "<NAME>",
"1896-0": "<NAME>",
"1897-8": "Akiachak",
"1898-6": "Akiak",
"1899-4": "Alakanuk",
"1900-0": "Aleknagik",
"1901-8": "Andreafsky",
"1902-6": "Aniak",
"1903-4": "Atmautluak",
"1904-2": "Bethel",
"1905-9": "<NAME>",
"1906-7": "Bristol Bay Yupik",
"1907-5": "<NAME>",
"1908-3": "Chefornak",
"1909-1": "Chevak",
"1910-9": "Chuathbaluk",
"1911-7": "Clark's Point",
"1912-5": "Crooked Creek",
"1913-3": "Dillingham",
"1914-1": "Eek",
"1915-8": "Ekuk",
"1916-6": "Ekwok",
"1917-4": "Emmonak",
"1918-2": "Goodnews Bay",
"1919-0": "Hooper Bay",
"1920-8": "Iqurmuit (Russian Mission)",
"1921-6": "Kalskag",
"1922-4": "Kasigluk",
"1923-2": "Kipnuk",
"1924-0": "Koliganek",
"1925-7": "Kongiganak",
"1926-5": "Kotlik",
"1927-3": "Kwethluk",
"1928-1": "Kwigillingok",
"1929-9": "Levelock",
"1930-7": "Lower Kalskag",
"1931-5": "Manokotak",
"1932-3": "Marshall",
"1933-1": "Mekoryuk",
"1934-9": "Mountain Village",
"1935-6": "Naknek",
"1936-4": "Napaumute",
"1937-2": "Napakiak",
"1938-0": "Napaskiak",
"1939-8": "Newhalen",
"1940-6": "New Stuyahok",
"1941-4": "Newtok",
"1942-2": "Nightmute",
"1943-0": "Nunapitchukv",
"1944-8": "Oscarville",
"1945-5": "Pilot Station",
"1946-3": "Pitkas Point",
"1947-1": "Platinum",
"1948-9": "Portage Creek",
"1949-7": "Quinhagak",
"1950-5": "Red Devil",
"1951-3": "St. Michael",
"1952-1": "<NAME>",
"1953-9": "Sheldon's Point",
"1954-7": "Sleetmute",
"1955-4": "Stebbins",
"1956-2": "Togiak",
"1957-0": "Toksook",
"1958-8": "Tulukskak",
"1959-6": "Tuntutuliak",
"1960-4": "Tununak",
"1961-2": "<NAME>",
"1962-0": "Georgetown",
"1963-8": "St. Mary's",
"1964-6": "Umkumiate",
"1966-1": "Aleut",
"1968-7": "Alutiiq Aleut",
"1969-5": "Tatitlek",
"1970-3": "Ugashik",
"1972-9": "Bristol Bay Aleut",
"1973-7": "Chignik",
"1974-5": "Chignik Lake",
"1975-2": "Egegik",
"1976-0": "Igiugig",
"1977-8": "Ivanof Bay",
"1978-6": "King Salmon",
"1979-4": "Kokhanok",
"1980-2": "Perryville",
"1981-0": "Pilot Point",
"1982-8": "Port Heiden",
"1984-4": "Chugach Aleut",
"1985-1": "Chenega",
"1986-9": "Chugach Corporation",
"1987-7": "English Bay",
"1988-5": "Port Graham",
"1990-1": "Eyak",
"1992-7": "Koniag Aleut",
"1993-5": "Akhiok",
"1994-3": "Agdaagux",
"1995-0": "Karluk",
"1996-8": "Kodiak",
"1997-6": "Larsen Bay",
"1998-4": "Old Harbor",
"1999-2": "Ouzinkie",
"2000-8": "Port Lions",
"2002-4": "Sugpiaq",
"2004-0": "Suqpigaq",
"2006-5": "Unangan Aleut",
"2007-3": "Akutan",
"2008-1": "Aleut Corporation",
"2009-9": "Aleutian",
"2010-7": "Aleutian Islander",
"2011-5": "Atka",
"2012-3": "Belkofski",
"2013-1": "<NAME>",
"2014-9": "King Cove",
"2015-6": "False Pass",
"2016-4": "<NAME>",
"2017-2": "Nikolski",
"2018-0": "<NAME>",
"2019-8": "<NAME>",
"2020-6": "Qawalangin",
"2021-4": "St. George",
"2022-2": "St. Paul",
"2023-0": "Sand Point",
"2024-8": "South Naknek",
"2025-5": "Unalaska",
"2026-3": "Unga",
"2028-9": "Asian",
"2029-7": "Asian Indian",
"2030-5": "Bangladeshi",
"2031-3": "Bhutanese",
"2032-1": "Burmese",
"2033-9": "Cambodian",
"2034-7": "Chinese",
"2035-4": "Taiwanese",
"2036-2": "Filipino",
"2037-0": "Hmong",
"2038-8": "Indonesian",
"2039-6": "Japanese",
"2040-4": "Korean",
"2041-2": "Laotian",
"2042-0": "Malaysian",
"2043-8": "Okinawan",
"2044-6": "Pakistani",
"2045-3": "Sri Lankan",
"2046-1": "Thai",
"2047-9": "Vietnamese",
"2048-7": "Iwo Jiman",
"2049-5": "Maldivian",
"2050-3": "Nepalese",
"2051-1": "Singaporean",
"2052-9": "Madagascar",
"2054-5": "Black or African American",
"2056-0": "Black",
"2058-6": "African American",
"2060-2": "African",
"2061-0": "Botswanan",
"2062-8": "Ethiopian",
"2063-6": "Liberian",
"2064-4": "Namibian",
"2065-1": "Nigerian",
"2066-9": "Zairean",
"2067-7": "Bahamian",
"2068-5": "Barbadian",
"2069-3": "Dominican",
"2070-1": "Dominica Islander",
"2071-9": "Haitian",
| |
of this FederatedConnection.
The username on the service account.
:param serviceaccountuser: The serviceaccountuser of this FederatedConnection.
:type: str
"""
self._attrs["serviceaccountuser"] = serviceaccountuser
def to_dict(self):
return {k: v for (k, v) in self._attrs.items() if v is not None}
class FederatedConnectionInput(SSCModel):
@staticmethod
def _from_dict(model: dict) -> "FederatedConnectionInput":
instance = FederatedConnectionInput.__new__(FederatedConnectionInput)
instance._attrs = model
return instance
def __init__(self, hostnameip: "str" = None, name: "str" = None, port: "float" = None, serviceaccountpassword: "str" = None, serviceaccountuser: "str" = None, **extra):
"""FederatedConnectionInput"""
self._attrs = dict()
if hostnameip is not None:
self._attrs["hostnameip"] = hostnameip
if name is not None:
self._attrs["name"] = name
if port is not None:
self._attrs["port"] = port
if serviceaccountpassword is not None:
self._attrs["serviceaccountpassword"] = serviceaccountpassword
if serviceaccountuser is not None:
self._attrs["serviceaccountuser"] = serviceaccountuser
for k, v in extra.items():
self._attrs[k] = v
@property
def hostnameip(self) -> "str":
""" Gets the hostnameip of this FederatedConnectionInput.
The remote hostname to connect to.
"""
return self._attrs.get("hostnameip")
@hostnameip.setter
def hostnameip(self, hostnameip: "str"):
"""Sets the hostnameip of this FederatedConnectionInput.
The remote hostname to connect to.
:param hostnameip: The hostnameip of this FederatedConnectionInput.
:type: str
"""
self._attrs["hostnameip"] = hostnameip
@property
def name(self) -> "str":
""" Gets the name of this FederatedConnectionInput.
The name of the federated connection.
"""
return self._attrs.get("name")
@name.setter
def name(self, name: "str"):
"""Sets the name of this FederatedConnectionInput.
The name of the federated connection.
:param name: The name of this FederatedConnectionInput.
:type: str
"""
self._attrs["name"] = name
@property
def port(self) -> "float":
""" Gets the port of this FederatedConnectionInput.
The remote port number.
"""
return self._attrs.get("port")
@port.setter
def port(self, port: "float"):
"""Sets the port of this FederatedConnectionInput.
The remote port number.
:param port: The port of this FederatedConnectionInput.
:type: float
"""
self._attrs["port"] = port
@property
def serviceaccountpassword(self) -> "str":
""" Gets the serviceaccountpassword of this FederatedConnectionInput.
The password of the service account.
"""
return self._attrs.get("serviceaccountpassword")
@serviceaccountpassword.setter
def serviceaccountpassword(self, serviceaccountpassword: "str"):
"""Sets the serviceaccountpassword of this FederatedConnectionInput.
The password of the service account.
:param serviceaccountpassword: The serviceaccountpassword of this FederatedConnectionInput.
:type: str
"""
self._attrs["serviceaccountpassword"] = serviceaccountpassword
@property
def serviceaccountuser(self) -> "str":
""" Gets the serviceaccountuser of this FederatedConnectionInput.
The username on the service account.
"""
return self._attrs.get("serviceaccountuser")
@serviceaccountuser.setter
def serviceaccountuser(self, serviceaccountuser: "str"):
"""Sets the serviceaccountuser of this FederatedConnectionInput.
The username on the service account.
:param serviceaccountuser: The serviceaccountuser of this FederatedConnectionInput.
:type: str
"""
self._attrs["serviceaccountuser"] = serviceaccountuser
def to_dict(self):
return {k: v for (k, v) in self._attrs.items() if v is not None}
class FederatedDataset(Dataset):
@staticmethod
def _from_dict(model: dict) -> "FederatedDataset":
instance = FederatedDataset.__new__(FederatedDataset)
instance._attrs = model
return instance
def __init__(self, created: "str", createdby: "str", id: "str", modified: "str", modifiedby: "str", name: "str", owner: "str", resourcename: "str", appclientidcreatedby: "str" = None, appclientidmodifiedby: "str" = None, description: "str" = None, federated_connection: "str" = None, federated_dataset: "str" = None, federated_dataset_kind: "str" = None, namespace: "str" = None, summary: "str" = None, title: "str" = None, **extra):
"""FederatedDataset"""
self._attrs = dict()
if created is not None:
self._attrs["created"] = created
if createdby is not None:
self._attrs["createdby"] = createdby
if id is not None:
self._attrs["id"] = id
if modified is not None:
self._attrs["modified"] = modified
if modifiedby is not None:
self._attrs["modifiedby"] = modifiedby
if name is not None:
self._attrs["name"] = name
if owner is not None:
self._attrs["owner"] = owner
if resourcename is not None:
self._attrs["resourcename"] = resourcename
if appclientidcreatedby is not None:
self._attrs["appclientidcreatedby"] = appclientidcreatedby
if appclientidmodifiedby is not None:
self._attrs["appclientidmodifiedby"] = appclientidmodifiedby
if description is not None:
self._attrs["description"] = description
if federated_connection is not None:
self._attrs["federatedConnection"] = federated_connection
if federated_dataset is not None:
self._attrs["federatedDataset"] = federated_dataset
if federated_dataset_kind is not None:
self._attrs["federatedDatasetKind"] = federated_dataset_kind
self._attrs["kind"] = "federated"
if namespace is not None:
self._attrs["namespace"] = namespace
if summary is not None:
self._attrs["summary"] = summary
if title is not None:
self._attrs["title"] = title
for k, v in extra.items():
self._attrs[k] = v
@property
def created(self) -> "str":
""" Gets the created of this FederatedDataset.
The date and time object was created.
"""
return self._attrs.get("created")
@created.setter
def created(self, created: "str"):
"""Sets the created of this FederatedDataset.
The date and time object was created.
:param created: The created of this FederatedDataset.
:type: str
"""
if created is None:
raise ValueError("Invalid value for `created`, must not be `None`")
self._attrs["created"] = created
@property
def createdby(self) -> "str":
""" Gets the createdby of this FederatedDataset.
The name of the user who created the object. This value is obtained from the bearer token and may not be changed.
"""
return self._attrs.get("createdby")
@createdby.setter
def createdby(self, createdby: "str"):
"""Sets the createdby of this FederatedDataset.
The name of the user who created the object. This value is obtained from the bearer token and may not be changed.
:param createdby: The createdby of this FederatedDataset.
:type: str
"""
if createdby is None:
raise ValueError("Invalid value for `createdby`, must not be `None`")
self._attrs["createdby"] = createdby
@property
def id(self) -> "str":
""" Gets the id of this FederatedDataset.
A unique dataset ID.
"""
return self._attrs.get("id")
@id.setter
def id(self, id: "str"):
"""Sets the id of this FederatedDataset.
A unique dataset ID.
:param id: The id of this FederatedDataset.
:type: str
"""
if id is None:
raise ValueError("Invalid value for `id`, must not be `None`")
self._attrs["id"] = id
@property
def modified(self) -> "str":
""" Gets the modified of this FederatedDataset.
The date and time object was modified.
"""
return self._attrs.get("modified")
@modified.setter
def modified(self, modified: "str"):
"""Sets the modified of this FederatedDataset.
The date and time object was modified.
:param modified: The modified of this FederatedDataset.
:type: str
"""
if modified is None:
raise ValueError("Invalid value for `modified`, must not be `None`")
self._attrs["modified"] = modified
@property
def modifiedby(self) -> "str":
""" Gets the modifiedby of this FederatedDataset.
The name of the user who most recently modified the object.
"""
return self._attrs.get("modifiedby")
@modifiedby.setter
def modifiedby(self, modifiedby: "str"):
"""Sets the modifiedby of this FederatedDataset.
The name of the user who most recently modified the object.
:param modifiedby: The modifiedby of this FederatedDataset.
:type: str
"""
if modifiedby is None:
raise ValueError("Invalid value for `modifiedby`, must not be `None`")
self._attrs["modifiedby"] = modifiedby
@property
def name(self) -> "str":
""" Gets the name of this FederatedDataset.
The dataset name. Dataset names must be unique within each module.
"""
return self._attrs.get("name")
@name.setter
def name(self, name: "str"):
"""Sets the name of this FederatedDataset.
The dataset name. Dataset names must be unique within each module.
:param name: The name of this FederatedDataset.
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`")
self._attrs["name"] = name
@property
def owner(self) -> "str":
""" Gets the owner of this FederatedDataset.
The name of the object's owner.
"""
return self._attrs.get("owner")
@owner.setter
def owner(self, owner: "str"):
"""Sets the owner of this FederatedDataset.
The name of the object's owner.
:param owner: The owner of this FederatedDataset.
:type: str
"""
if owner is None:
raise ValueError("Invalid value for `owner`, must not be `None`")
self._attrs["owner"] = owner
@property
def resourcename(self) -> "str":
""" Gets the resourcename of this FederatedDataset.
The dataset name qualified by the module name.
"""
return self._attrs.get("resourcename")
@resourcename.setter
def resourcename(self, resourcename: "str"):
"""Sets the resourcename of this FederatedDataset.
The dataset name qualified by the module name.
:param resourcename: The resourcename of this FederatedDataset.
:type: str
"""
if resourcename is None:
raise ValueError("Invalid value for `resourcename`, must not be `None`")
self._attrs["resourcename"] = resourcename
@property
def appclientidcreatedby(self) -> "str":
""" Gets the appclientidcreatedby of this FederatedDataset.
AppClinetId of the creator app of the dataset.
"""
return self._attrs.get("appclientidcreatedby")
@appclientidcreatedby.setter
def appclientidcreatedby(self, appclientidcreatedby: "str"):
"""Sets the appclientidcreatedby of this FederatedDataset.
AppClinetId of the creator app of the dataset.
:param appclientidcreatedby: The appclientidcreatedby of this FederatedDataset.
:type: str
"""
self._attrs["appclientidcreatedby"] = appclientidcreatedby
@property
def appclientidmodifiedby(self) -> "str":
""" Gets the appclientidmodifiedby of this FederatedDataset.
AppClinetId of the modifier app of the dataset.
"""
return self._attrs.get("appclientidmodifiedby")
@appclientidmodifiedby.setter
def appclientidmodifiedby(self, appclientidmodifiedby: "str"):
"""Sets the appclientidmodifiedby of this FederatedDataset.
AppClinetId of the modifier app of the dataset.
| |
bone.bone.use_local_location = False
drivers = self.GetDrivers(bone.name,'location')
if axisFlip != None:
print(axisFlip)
uVec[axisFlip] *= -1
#if axisFlip == 0:
# ux = -ux
#elif axisFlip == 1:
# uy = -uy
#elif axisFlip == 2:
# uz = -uz
self.AddPositionDriver(prop, axisOrder[0], bone, drivers, 0, uVec[0])
self.AddPositionDriver(prop, axisOrder[1], bone, drivers, 1, uVec[1])
self.AddPositionDriver(prop, axisOrder[2], bone, drivers, 2, uVec[2])
return
def SetScale(self, prop, boneName, axisOrder=[0,1,2]):
# Check if object has this property
#if not bpy.context.object.get(prop) or ONLY_FIX_SETTINGS:
# self.AddVectorProperty(bpy.context.object, prop, default=1.0, min=0.1, max=3.0)
# if ONLY_FIX_SETTINGS:
# return
# x, y, z = x, z, y
#axisOrder[0], axisOrder[1], axisOrder[2] = axisOrder[0], axisOrder[2], axisOrder[1]
axisMat = get_axis_order_matrix(axisOrder).to_4x4()
axisMat = compat.convert_cm_to_bl_bone_rotation(axisMat)
#axisMat = compat.convert_cm_to_bl_bone_space(axisMat)
axisOrder = get_matrix_axis_order(axisMat)
bone = self.GetPoseBone(boneName)
if bone:
drivers = self.GetDrivers(bone.name,'scale')
self.AddScaleDriver(prop, axisOrder[0], bone, drivers, 2)
self.AddScaleDriver(prop, axisOrder[1], bone, drivers, 1)
self.AddScaleDriver(prop, axisOrder[2], bone, drivers, 0)
# repeat for left side
if '?' in boneName:
bone = self.GetPoseBone(boneName, True)
if bone:
drivers = self.GetDrivers(bone.name,'scale')
self.AddScaleDriver(prop, axisOrder[0], bone, drivers, 2)
self.AddScaleDriver(prop, axisOrder[1], bone, drivers, 1)
self.AddScaleDriver(prop, axisOrder[2], bone, drivers, 0)
return
@compat.BlRegister()
class CNV_OT_add_cm3d2_body_sliders(bpy.types.Operator):
bl_idname = 'object.add_cm3d2_body_sliders'
bl_label = "Add CM3D2 Body Sliders"
bl_description = "Adds drivers to armature to enable body sliders."
bl_options = {'REGISTER', 'UNDO'}
scale = bpy.props.FloatProperty(name="Scale", default=5, min=0.1, max=100, soft_min=0.1, soft_max=100, step=100, precision=1, description="The amount by which the mesh is scaled when imported. Recommended that you use the same when at the time of export.")
is_fix_thigh : bpy.props.BoolProperty(name="Fix Thigh" , default=False, description="Fix twist bone values for the thighs in motor-cycle pose")
is_drive_shape_keys: bpy.props.BoolProperty(name="Drive Shape Keys", default=True, description="Connect sliders to mesh children's shape keys" )
@classmethod
def poll(cls, context):
ob = context.object
if ob:
arm = ob.data
else:
arm = None
has_arm = arm and isinstance(arm, bpy.types.Armature) and ("Bip01" in arm.bones)
can_edit = (ob and ob.data == arm) or (arm and arm.is_editmode)
return has_arm and can_edit
def invoke(self, context, event):
self.scale = common.preferences().scale
return context.window_manager.invoke_props_dialog(self)
def draw(self, context):
self.layout.prop(self, 'scale' )
#self.layout.prop(self, 'is_fix_thigh' )
self.layout.prop(self, 'is_drive_shape_keys')
def driveShapeKey(self, shapekey, data, prop, expression, set_min=None, set_max=None):
if not shapekey:
return
driver = shapekey.driver_add('value').driver
driver.type = 'SCRIPTED'
driver_var = driver.variables.new() if len(driver.variables) < 1 else driver.variables[0]
driver_var.type = 'SINGLE_PROP'
driver_var.name = prop
driver_target = driver_var.targets[0]
driver_target.id_type = 'OBJECT'
driver_target.id = data.id_data
driver_target.data_path = data.path_from_id(prop)
driver.expression = expression
if set_min:
shapekey.slider_min = set_min
if set_max:
shapekey.slider_max = set_max
def driveTwistBone(self, bone, prop='rotation_euler', axis=0, expression=""):
if not bone:
return
driver = bone.driver_add(prop, axis).driver
driver.type = 'SCRIPTED'
driver.use_self = True
driver.expression = expression
def execute(self, context):
ob = context.object
arm = ob.data
pre_mode = ob.mode
#if pre_mode != 'EDIT':
# override = context.copy()
# override['active_object'] = ob
# bpy.ops.object.mode_set(override, mode='EDIT')
morph = ob.cm3d2_bone_morph
sliders = ob.cm3d2_wide_slider
morph.scale = self.scale
sliders.scale = self.scale
#BoneMorph.SetPosition("KubiScl", "Bip01 Neck" , 0.95, 1 , 1 , 1.05, 1 , 1 )
#BoneMorph.SetPosition("KubiScl", "Bip01 Head" , 0.8 , 1 , 1 , 1.2 , 1 , 1 )
#BoneMorph.SetScale ("UdeScl" , "Bip01 ? UpperArm" , 0.85, 1 , 1 , 1.15, 1 , 1 )
#BoneMorph.SetScale ("EyeSclX", "Eyepos_L" , 1 , 1 , 0.92, 1 , 1 , 1.08)
#BoneMorph.SetScale ("EyeSclX", "Eyepos_R" , 1 , 1 , 0.92, 1 , 1 , 1.08)
#BoneMorph.SetScale ("EyeSclY", "Eyepos_L" , 1 , 0.92, 1 , 1 , 1.08, 1 )
#BoneMorph.SetScale ("EyeSclY", "Eyepos_R" , 1 , 0.92, 1 , 1 , 1.08, 1 )
#BoneMorph.SetPosition("EyePosX", "Eyepos_R" , 1 , 1 , 0.9 , 1 , 1 , 1.1 )
#BoneMorph.SetPosition("EyePosX", "Eyepos_L" , 1 , 1 , 0.9 , 1 , 1 , 1.1 )
#BoneMorph.SetPosition("EyePosY", "Eyepos_R" , 1 , 0.93, 1 , 1 , 1.07, 1 )
#BoneMorph.SetPosition("EyePosY", "Eyepos_L" , 1 , 0.93, 1 , 1 , 1.07, 1 )
#BoneMorph.SetScale ("HeadX" , "Bip01 Head" , 1 , 0.9 , 0.8 , 1 , 1.1 , 1.2 )
#BoneMorph.SetScale ("HeadY" , "Bip01 Head" , 0.8 , 0.9 , 1 , 1.2 , 1.1 , 1 )
#BoneMorph.SetPosition("DouPer" , "Bip01 Spine" , 1 , 1 , 0.94, 1 , 1 , 1.06)
#BoneMorph.SetPosition("DouPer" , "Bip01 Spine0a" , 0.88, 1 , 1 , 1.12, 1 , 1 )
#BoneMorph.SetPosition("DouPer" , "Bip01 Spine1" , 0.88, 1 , 1 , 1.12, 1 , 1 )
#BoneMorph.SetPosition("DouPer" , "Bip01 Spine1a" , 0.88, 1 , 1 , 1.12, 1 , 1 )
#BoneMorph.SetPosition("DouPer" , "Bip01 Neck" , 1.03, 1 , 1 , 0.97, 1 , 1 )
#BoneMorph.SetPosition("DouPer" , "Bip01 ? Calf" , 0.87, 1 , 1 , 1.13, 1 , 1 )
#BoneMorph.SetPosition("DouPer" , "Bip01 ? Foot" , 0.87, 1 , 1 , 1.13, 1 , 1 )
#BoneMorph.SetScale ("DouPer" , "Bip01 ? Thigh_SCL_", 0.87, 1 , 1 , 1.13, 1 , 1 )
#BoneMorph.SetScale ("DouPer" , "momotwist_?" , 0.87, 1 , 1 , 1.13, 1 , 1 )
#BoneMorph.SetScale ("DouPer" , "Bip01 ? Calf_SCL_" , 0.87, 1 , 1 , 1.13, 1 , 1 )
#BoneMorph.SetScale ("DouPer" , "Bip01 ? UpperArm" , 0.98, 1 , 1 , 1.02, 1 , 1 )
#BoneMorph.SetPosition("sintyou", "Bip01 Spine" , 1 , 1 , 0.85, 1 , 1 , 1.15)
#BoneMorph.SetPosition("sintyou", "Bip01 Spine0a" , 0.88, 1 , 1 , 1.12, 1 , 1 )
#BoneMorph.SetPosition("sintyou", "Bip01 Spine1" , 0.88, 1 , 1 , 1.12, 1 , 1 )
#BoneMorph.SetPosition("sintyou", "Bip01 Spine1a" , 0.88, 1 , 1 , 1.12, 1 , 1 )
#BoneMorph.SetPosition("sintyou", "Bip01 Neck" , 0.97, 1 , 1 , 1.03, 1 , 1 )
#BoneMorph.SetPosition("sintyou", "Bip01 Head" , 0.9 , 1 , 1 , 1.1 , 1 , 1 )
#BoneMorph.SetPosition("sintyou", "Bip01 ? Calf" , 0.87, 1 , 1 , 1.13, 1 , 1 )
#BoneMorph.SetPosition("sintyou", "Bip01 ? Foot" , 0.87, 1 , 1 , 1.13, 1 , 1 )
#BoneMorph.SetScale ("sintyou", "Bip01 ? UpperArm" , 0.9 , 1 , 1 , 1.1 , 1 , 1 )
#BoneMorph.SetScale ("sintyou", "Bip01 ? Thigh_SCL_", 0.87, 1 , 1 , 1.13, 1 , 1 )
#BoneMorph.SetScale ("sintyou", "momotwist_?" , 0.87, 1 , 1 , 1.13, 1 , 1 )
#BoneMorph.SetScale ("sintyou", "Bip01 ? Calf_SCL_" , 0.87, 1 , 1 , 1.13, 1 , 1 )
#BoneMorph.SetScale ("koshi" , "Bip01 Pelvis_SCL_" , 1 , 0.8 , 0.92, 1 , 1.2 , 1.08)
#BoneMorph.SetScale ("koshi" , "Bip01 Spine_SCL_" , 1 , 1 , 1 , 1 , 1 , 1 )
#BoneMorph.SetScale ("koshi" , "Hip_?" , 1 , 0.96, 0.9 , 1 , 1.04, 1.1 )
#BoneMorph.SetScale ("koshi" , "Skirt" , 1 , 0.85, 0.88, 1 , 1.2 , 1.12)
#BoneMorph.SetPosition("kata" , "Bip01 ? Clavicle" , 0.98, 1 , 0.5 , 1.02, 1 , 1.5 )
#BoneMorph.SetScale ("kata" , "Bip01 Spine1a_SCL_", 1 , 1 , 0.95, 1 , 1 , 1.05)
#BoneMorph.SetScale ("west" , "Bip01 Spine_SCL_" , 1 , 0.95, 0.9 , 1 , 1.05, 1.1 )
#BoneMorph.SetScale ("west" , "Bip01 Spine0a_SCL_", 1 , 0.85, 0.7 , 1 , 1.15, 1.3 )
#BoneMorph.SetScale ("west" , "Bip01 Spine1_SCL_" , 1 , 0.9 , 0.85, 1 , 1.1 , 1.15)
#BoneMorph.SetScale ("west" , "Bip01 Spine1a_SCL_", 1 , 0.95, 0.95, 1 , 1.05, 1.05)
#BoneMorph.SetScale ("west" , "Skirt" , 1 , 0.92, 0.88, 1 , 1.08, 1.12)
morph.SetPosition("KubiScl", "Bip01 Neck" , 1.05, 1 , 1 )
morph.SetPosition("KubiScl", "Bip01 Head" , 1.2 , 1 , 1 )
morph.SetScale ("UdeScl" , "Bip01 ? UpperArm" , 1.15, 1 , 1 )
morph.SetScale ("HeadX" , "Bip01 Head" , 1 , 1.1 , 1.2 )
morph.SetScale ("HeadY" , "Bip01 Head" , 1.2 , 1.1 , 1 )
morph.SetPosition("sintyou", "Bip01 Spine" , 1 , 1 , 1.15)
morph.SetPosition("sintyou", "Bip01 Spine0a" , 1.12, 1 , 1 )
morph.SetPosition("sintyou", "Bip01 Spine1" , 1.12, 1 , 1 )
morph.SetPosition("sintyou", "Bip01 Spine1a" , 1.12, 1 , 1 )
morph.SetPosition("sintyou", "Bip01 Neck" , 1.03, 1 , 1 )
morph.SetPosition("sintyou", "Bip01 Head" , 1.1 , 1 , 1 )
morph.SetPosition("sintyou", "Bip01 ? Calf" , 1.13, 1 , 1 )
| |
<reponame>tsadakane/TIGRE
"""Three-dimensional Shepp-Logan head phantom
Variations of three-dimensional Shepp-Logan head phantom.
Copyright
=========
3-clause BSD License
Copyright 2021 SADAKANE, Tomoyuki
https://github.com/tsadakane/sl3d
This code is inspired by the following MATLAB codes:
* <NAME> (2021). 3D Shepp-Logan phantom (https://www.mathworks.com/matlabcentral/fileexchange/9416-3d-shepp-logan-phantom), MATLAB Central File Exchange. Retrieved April 29, 2021.
* <NAME> (2021). 3D Shepp-Logan Phantom (https://www.mathworks.com/matlabcentral/fileexchange/50974-3d-shepp-logan-phantom), MATLAB Central File Exchange. Retrieved April 29, 2021.
"""
import numpy as np
#%%
def shepp_logan_3d(size_out=128, phantom_type="yu-ye-wang", get_ellipsoids=False):
"""Three-dimensional Shepp-Logan head phantom
Variations of three-dimensional Shepp-Logan head phantom.
Parameters
==========
phantom_type: str
One of {"kak-slaney", "yu-ye-wang", "toft-schabel"}, optional
Default is "yu-ye-wang"
The type of phantom.
size_out : int or list whose length is three, optional
Default is [128, 128, 128]
The number of voxels of phantom. [nVoxelZ, nVoxelY, nVoxelX]
get_ellipsoids: bool
Default is False.
If False, returns the parameters of the ellipsoids as well as the phantom image.
Returns
=======
If get_ellipsoids is True:
(img_phantom, ellipsoids)
img_phantom: numpy ndarray
ellipsoids: list of parameters that defines ellipsoids
else:
img_phantom: numpy ndarray
Notes
=====
For any given voxel in the output image, the voxel's value is equal to the
sum of the additive intensity values of all ellipsoids that the voxel is a
part of. If a voxel is not part of any ellipsoid, its value is 0.
The additive intensity value A for an ellipsoid can be positive or
negative; if it is negative, the ellipsoid will be darker than the
surrounding voxels.
Note that, depending on the values of A, some voxels may have values
outside the range [0,1].
The voxel value at (x, y, z) is img[z, y, x].
"""
(ellipsoids, nVoxel, formula) = _parse_inputs(size_out, phantom_type)
nVoxelX = nVoxel[2]
nVoxelY = nVoxel[1]
nVoxelZ = nVoxel[0]
img_phantom = np.zeros(nVoxel, dtype=np.float32)
range_x = np.linspace(-1, +1, nVoxelX)
range_y = np.linspace(-1, +1, nVoxelY)
range_z = np.linspace(-1, +1, nVoxelZ)
mesh_z, mesh_y, mesh_x = np.meshgrid(range_z, range_y, range_x, indexing="ij")
mesh_x = mesh_x.reshape(-1)
mesh_y = mesh_y.reshape(-1)
mesh_z = mesh_z.reshape(-1)
coord = np.vstack([mesh_z, mesh_y, mesh_x])
img_phantom = img_phantom.reshape(-1)
for ellipsoid in ellipsoids:
asq = ellipsoid[0] ** 2 # a^2
bsq = ellipsoid[1] ** 2 # b^2
csq = ellipsoid[2] ** 2 # c^2
x0 = ellipsoid[3] # x offset
y0 = ellipsoid[4] # y offset
z0 = ellipsoid[5] # z offset
phi1 = ellipsoid[6] * np.pi / 180 # 1st Euler angle in radians (rotation about z-axis)
phi2 = ellipsoid[7] * np.pi / 180 # 2nd Euler angle in radians (rotation about x'-axis)
phi3 = ellipsoid[8] * np.pi / 180 # 3rd Euler angle in radians (rotation about z"-axis)
A = ellipsoid[9] # Amplitude change for this ellipsoid
c1 = np.cos(phi1)
s1 = np.sin(phi1)
c2 = np.cos(phi2)
s2 = np.sin(phi2)
c3 = np.cos(phi3)
s3 = np.sin(phi3)
# Euler rotation matrix
alpha = [ # Z Y X
[c2, -s2 * c1, s2 * s1], # Z
[c3 * s2, -s3 * s1 + c3 * c2 * c1, -s3 * c1 - c3 * c2 * s1], # Y
[s3 * s2, c3 * s1 + s3 * c2 * c1, c3 * c1 - s3 * c2 * s1], # X
]
if formula == 0:
# Move the ellipsoid to the origin first, and rotate...
coord_rot = np.dot(alpha, coord - np.array([[z0], [y0], [x0]]))
idx = np.argwhere(
(coord_rot[2, :]) ** 2 / asq
+ (coord_rot[1, :]) ** 2 / bsq
+ (coord_rot[0, :]) ** 2 / csq
<= 1
)
# Naive:
# coord_rot = np.dot(alpha, coord-np.array([[z0], [y0], [x0]])) + np.array([[z0], [y0], [x0]])
# idx = np.argwhere((coord_rot[2,:]-x0)**2/asq + (coord_rot[1,:]-y0)**2/bsq + (coord_rot[0,:]-z0)**2/csq <= 1)
else:
# (x0,y0,z0) rotates too!
coord_rot = np.dot(alpha, coord)
idx = np.argwhere(
(coord_rot[2, :] - x0) ** 2 / asq
+ (coord_rot[1, :] - y0) ** 2 / bsq
+ (coord_rot[0, :] - z0) ** 2 / csq
<= 1
)
img_phantom[idx] += A
img_phantom = img_phantom.reshape(nVoxel)
if get_ellipsoids:
return img_phantom, ellipsoids
else:
return img_phantom
def _parse_inputs(size_out, phantom_type):
"""
Returns:
tuple (ellipsoids, nVoxel)
* ellipsoids is the m-by-10 array which defines m ellipsoids,
where m is 10 in the cases of the variants implemented in this file.
* nVoxel is the 3 array which defines the number of voxels
Parameters:
phantom_type: One of {"kak-slaney", "yu-ye-wang", "toft-schabel"}
size_out: An int or 3-vector.
* int : the phantom voxel will be isotropic.
* 3-vector: the size of the phantom image [nZ, nY, nX]
"""
if type(size_out) == int:
nVoxel = [size_out, size_out, size_out]
elif (type(size_out) == list or type(size_out) == tuple) and len(size_out) == 3:
nVoxel = [size_out[0], size_out[1], size_out[2]]
elif type(size_out) == np.array and np.size(size_out) == 3:
nVoxel = [size_out.reshape(-1)[0], size_out.reshape(-1)[1], size_out.reshape(-1)[2]]
else:
nVoxel = [128, 128, 128]
if phantom_type == "kak-slaney":
ellipsoids = kak_slaney()
formula = 0
elif phantom_type == "yu-ye-wang":
ellipsoids = yu_ye_wang()
formula = 0
elif phantom_type == "toft-schabel":
ellipsoids = toft_schabel()
formula = 1
else:
print(f"Unknown type {phantom_type}. yu-ye-wang is used.")
ellipsoids = yu_ye_wang()
formula = 0
return (ellipsoids, nVoxel, formula)
###################################
# Definetions of Head phantoms: #
###################################
def kak_slaney():
"""
The 3D Shepp-Logan head phantom. A is the relative density of water.
Ref:
[1] Kak AC, <NAME>, Principles of Computerized Tomographic Imaging, 1988. p.102
http://www.slaney.org/pct/pct-errata.html
"""
# a b c x0 y0 z0 phi1 phi2 phi3 A
# -------------------------------------------------------------------------
ells = [
[0.6900, 0.920, 0.900, 0.000, 0.000, 0.000, 0, 0, 0, 2.00],
[0.6624, 0.874, 0.880, 0.000, 0.000, 0.000, 0, 0, 0, -0.98],
[0.4100, 0.160, 0.210, -0.220, 0.000, -0.250, 108, 0, 0, -0.02],
[0.3100, 0.110, 0.220, 0.220, 0.000, -0.250, 72, 0, 0, -0.02],
[0.2100, 0.250, 0.500, 0.000, 0.350, -0.250, 0, 0, 0, 0.02],
[0.0460, 0.046, 0.046, 0.000, 0.100, -0.250, 0, 0, 0, 0.02],
[0.0460, 0.023, 0.020, -0.080, -0.650, -0.250, 0, 0, 0, 0.01],
[0.0460, 0.023, 0.020, 0.060, -0.650, -0.250, 90, 0, 0, 0.01],
[0.0560, 0.040, 0.100, 0.060, -0.105, 0.625, 90, 0, 0, 0.02],
[0.0560, 0.056, 0.100, 0.000, 0.100, 0.625, 0, 0, 0, -0.02],
]
ells = np.asarray(np.matrix(ells))
return ells
def yu_ye_wang():
"""
A variant of the Kak-Slaney phantom in which the contrast is improved for better
visual perception.
Ref:
[2] <NAME>, <NAME>, <NAME>, Katsevich-Type Algorithms for Variable Radius Spiral Cone-Beam CT
Proceedings of the SPIE, Volume 5535, p. 550-557 (2004)
"""
ells = kak_slaney()
ells[:, 9] = np.array([1.00, -0.80, -0.2, -0.2, 0.2, 0.2, 0.1, 0.1, 0.2, -0.2])
# # a b c x0 y0 z0 phi1 phi2 phi3 A
# # -----------------------------------------------------------------
# ells = [[ 0.6900, 0.920, 0.900, 0 , 0 , 0 , 0 , 0, 0, 1.0 ],
# [ 0.6624, 0.874, 0.880, 0 , 0 , 0 , 0 , 0, 0, -0.8 ],
# [ 0.4100, 0.160, 0.210, -0.22, 0 , -0.250, 108, 0, 0, -0.2 ],
# [ 0.3100, 0.110, 0.220, 0.22, 0 , -0.25 , 72 , 0, 0, -0.2 ],
# [ 0.2100, 0.250, 0.500, 0 , 0.35 , -0.25 , 0 , 0, 0, 0.2 ],
# [ 0.0460, 0.046, 0.046, 0 , 0.1 , -0.25 , 0 , 0, 0, 0.2 ],
# [ 0.0460, 0.023, 0.020, -0.08, -0.65 , -0.25 , 0 , 0, 0, 0.1 ],
# [ 0.0460, 0.023, 0.020, 0.06, -0.65 , -0.25 , 90 , 0, 0, 0.1 ],
# [ 0.0560, 0.040, 0.100, 0.06, -0.105, 0.625, 90 , 0, 0, 0.2 ],
# [ 0.0560, 0.056, 0.100, 0 , 0.100, 0.625, 0 , 0, 0, -0.2 ]]
return ells
def toft_schabel():
"""
The geometry of this phantom is based on the 2D phantom shown in [3] and [4].
(Maybe this is the original Shepp-Logan head phantom?)
In [5], the intensities of the Shepp-Logan are modified
to yield higher contrast in the image.
It is known as 'Modified Shepp-Logan' of the `phantom` function of "Image Processing Toolbox" for MATLAB
In [6], it is extended to the 3D version. The parameters are as below.
The formula of geometry transfom for this option is the same as of [6] to reproduce the result,
while for other options, kak-slaney and yu-ye-wang, it is different.
Ref:
| |
= 'stopped'
elif isinstance(status, int):
status = 'stopped[%s]' % exitcodedict.get(status, status)
return '<%s(%s, %s%s)>' % (
type(self).__name__,
self._name,
status,
self.daemon and ' daemon' or ''
)
def join(self, timeout=None):
"""
Wait cooperatively until child process terminates or timeout occurs.
:arg timeout: ``None`` (default) or a a time in seconds. The method
simply returns upon timeout expiration. The state of the process
has to be identified via ``is_alive()``.
"""
self._check_closed()
assert self._parent_pid == os.getpid(), "I'm not parent of this child."
assert self._popen is not None, 'Can only join a started process.'
if not WINDOWS:
# Resemble multiprocessing's join() method while replacing
# `self._popen.wait(timeout)` with
# `self._returnevent.wait(timeout)`
self._returnevent.wait(timeout)
if self._popen.returncode is not None:
if hasattr(multiprocessing.process, '_children'):
# This is for Python 3.4 and beyond.
kids = multiprocessing.process._children
else:
# For Python 2.6, 2.7, 3.3.
kids = multiprocessing.process._current_process._children
kids.discard(self)
return
with gevent.Timeout(timeout, False):
while self.is_alive():
# This frequency seems reasonable, but that's not 100 % certain.
gevent.sleep(0.01)
# Clean up after child as designed by Process class (non-blocking).
super(_GProcess, self).join(timeout=0)
class _GIPCHandle(object):
"""
The ``_GIPCHandle`` class implements common features of read and write
handles. ``_GIPCHandle`` instances are created via :func:`pipe`.
.. todo::
Implement destructor?
http://eli.thegreenplace.net/2009/06/12/
safely-using-destructors-in-python/
"""
def __init__(self):
global _all_handles
# Generate label of text/unicode type from three random bytes.
self._id = codecs.encode(os.urandom(3), "hex_codec").decode("ascii")
self._legit_pid = os.getpid()
self._make_nonblocking()
# Define lock for synchronizing access to this handle within the current
# process. Note that a `gevent.lock.Semaphore` instance lives on the
# heap of the current process and cannot be used to synchronize access
# across multiple processes. That is, this lock is only meaningful in
# the current process. This is especially important to consider when the
# platform supports fork()ing.
self._lock = gevent.lock.Semaphore(value=1)
self._closed = False
_all_handles.append(self)
def _make_nonblocking(self):
if hasattr(gevent.os, 'make_nonblocking'):
# On POSIX-compliant systems, the file descriptor flags are
# inherited after forking, i.e. it is sufficient to make fd
# nonblocking only once.
gevent.os.make_nonblocking(self._fd)
def close(self):
"""Close underlying file descriptor and de-register handle from further
usage. Is called on context exit.
Raises:
- :exc:`GIPCError`
- :exc:`GIPCClosed`
- :exc:`GIPCLocked`
"""
global _all_handles
self._validate()
if not self._lock.acquire(blocking=False):
raise GIPCLocked(
"Can't close handle %s: locked for I/O operation." % self)
log.debug("Invalidating %s ...", self)
if self._fd is not None:
os.close(self._fd)
self._fd = None
if self in _all_handles:
# Remove the handle from the global list of valid handles.
_all_handles.remove(self)
self._closed = True
self._lock.release()
def _set_legit_process(self):
log.debug("Legitimate %s for current process.", self)
self._legit_pid = os.getpid()
def _validate(self):
"""Raise exception if this handle is closed or not registered to be
used in the current process.
Intended to be called before every operation on `self._fd`.
Reveals wrong usage of this module in the context of multiple
processes. Might prevent tedious debugging sessions. Has little
performance impact.
"""
if self._closed:
raise GIPCClosed(
"GIPCHandle has been closed before.")
if os.getpid() != self._legit_pid:
raise GIPCError(
"GIPCHandle %s not registered for current process %s." % (
self, os.getpid()))
def _winapi_childhandle_prepare_transfer(self):
"""Prepare file descriptor for transfer to child process on Windows.
What follows now is an overview for the process of transferring a
Windows pipe handle to a child process, for different Python versions
(explanation / background can be found below):
Python versions < 3.4:
1) In the parent, get WinAPI handle from C file descriptor via
msvcrt.get_osfhandle().
2) WinAPI call DuplicateHandle(... ,bInheritHandle=True) in
parent. Close old handle, let inheritable duplicate live on.
2.5) multiprocessing internals invoke WinAPI call CreateProcess(...,
InheritHandles=True).
3) Close the duplicate in the parent.
4) Use msvcrt.open_osfhandle() in child for converting the Windows
pipe handle to a C file descriptor, and for setting the
(read/write)-only access flag. This file descriptor will be
used by user code.
Python versions >= 3.4:
1) Same as above.
2) Store handle and process ID. Both are integers that will be
pickled to and used by the child.
2.5) multiprocessing internals invoke WinAPI call
CreateProcess(..., InheritHandles=False).
3) Steal the Windows pipe handle from the parent process: in the
child use the parent's process ID for getting a WinAPI handle
to the parent process via WinAPI call OpenProcess(). Use option
PROCESS_DUP_HANDLE as desired access right. Invoke
DuplicateHandle() in the child and use the handle to the parent
process as source process handle. Use the
DUPLICATE_CLOSE_SOURCE and the DUPLICATE_SAME_ACCESS flags. The
result is a Windows pipe handle, "stolen" from the parent.
4) Same as above.
Background:
By default, file descriptors are not inherited by child processes on
Windows. However, they can be made inheritable via calling the system
function `DuplicateHandle` while setting `bInheritHandle` to True.
From MSDN:
bInheritHandle:
A variable that indicates whether the handle is inheritable.
If TRUE, the duplicate handle can be inherited by new processes
created by the target process. If FALSE, the new handle cannot
be inherited.
The internals of Python's `subprocess` and `multiprocessing` make use of
this. There is, however, no officially exposed Python API. Nevertheless,
the function `multiprocessing.forking.duplicate` (in Python versions
smaller than 3.4) and `multiprocessing.reduction.duplicate` (>= 3.4)
seems to be safely usable. In all versions, `duplicate` is part of
`multiprocessing.reduction`. As of 2015-07-20, the reduction module is
part of multiprocessing in all Python versions from 2.6 to 3.5.
The just outlined approach (DuplicateHandle() in parent, automatically
inherit it in child) only works for Python versions smaller than 3.4:
from Python 3.4 on, the child process is created with CreateProcess()'s
`InheritHandles` attribute set to False (this was explicitly set to True
in older Python versions). A different method needs to be used, referred
to as "stealing" the handle: DuplicateHandle can be called in the child
for retrieving the handle from the parent while using the
_winapi.DUPLICATE_CLOSE_SOURCE flag, which automatically closes the
handle in the parent. This method is used by
`multiprocessing.popen_spawn_win32` and implemented in
`multiprocessing.reduction.steal_handle`.
Refs:
https://msdn.microsoft.com/en-us/library/windows/desktop/ms684880.aspx
https://msdn.microsoft.com/en-us/library/windows/desktop/ms684320.aspx
https://msdn.microsoft.com/en-us/library/ks2530z6.aspx
https://msdn.microsoft.com/en-us/library/bdts1c9x.aspx
"""
if WINAPI_HANDLE_TRANSFER_STEAL:
self._parent_winapihandle = msvcrt.get_osfhandle(self._fd)
self._parent_pid = os.getpid()
return
# Get Windows file handle from C file descriptor.
winapihandle = msvcrt.get_osfhandle(self._fd)
# Duplicate file handle, rendering the duplicate inheritable by
# processes created by the current process.
self._inheritable_winapihandle = multiprocessing.reduction.duplicate(
handle=winapihandle, inheritable=True)
# Close "old" (in-inheritable) file descriptor.
os.close(self._fd)
# Mark file descriptor as "already closed".
self._fd = None
def _winapi_childhandle_after_createprocess_parent(self):
"""Called on Windows in the parent process after the CreateProcess()
system call. This method is intended to revert the actions performed
within `_winapi_childhandle_prepare_transfer()`. In particular, this
method is intended to prepare a subsequent call to the handle's
`close()` method.
"""
if WINAPI_HANDLE_TRANSFER_STEAL:
del self._parent_winapihandle
del self._parent_pid
# Setting `_fd` to None prevents the subsequent `close()` method
# invocation (triggered in `start_process()` after child creation)
# from actually calling `os.close()` on the file descriptor. This
# must be prevented because at this point the handle either already
# is or will be "stolen" by the child via a direct WinAPI call using
# the DUPLICATE_CLOSE_SOURCE option (and therefore become
# auto-closed, here, in the parent). The relative timing is not
# predictable. If the child process steals first, os.close() here
# would result in `OSError: [Errno 9] Bad file descriptor`. If
# os.close() is called on the handle in the parent before the child
# can steal the handle, a `OSError: [WinError 6] The handle is
# invalid` will be thrown in the child upon the stealing attempt.
self._fd = None
return
# Get C file descriptor from Windows file handle.
self._fd = msvcrt.open_osfhandle(
self._inheritable_winapihandle, self._fd_flag)
del self._inheritable_winapihandle
def _winapi_childhandle_after_createprocess_child(self):
"""Called on Windows in the child process after the CreateProcess()
system call. This is required for making the handle usable in the child.
"""
if WINAPI_HANDLE_TRANSFER_STEAL:
# In this case the handle has not been inherited by the child
# process during CreateProcess(). Steal it from the parent.
new_winapihandle = multiprocessing.reduction.steal_handle(
self._parent_pid, self._parent_winapihandle)
del self._parent_winapihandle
del self._parent_pid
# Restore C file descriptor with (read/write)only flag.
self._fd = msvcrt.open_osfhandle(new_winapihandle, self._fd_flag)
return
# In this case the handle | |
elif reactNum == 3:
row["reactedType"] = "Wow"
elif reactNum == 4:
row["reactedType"] = "Heart"
elif reactNum == 5:
row["reactedType"] = "Angry"
elif reactNum == 6:
row["reactedType"] = "Haha"
else:
print("Invalid react Type")
return
try:
query = "INSERT INTO MAKES_GENERAL_REACT(post_id, user_id, reacted_type) VALUES(%(post_id)s, %(user_id)s, %(reactedType)s);"
cur.execute(query, row)
con.commit()
except Exception as e:
con.rollback()
print(e)
print("Error: Check your inputs.")
def addLikes():
global cur
row = {}
row["user_id"] = input("Enter the user ID: ")
row["page_id"] = input("Enter the page ID of the page to like: ")
try:
query = "INSERT INTO LIKES(page_id, user_id) VALUES(%(page_id)s, %(user_id)s);"
cur.execute(query, row)
con.commit()
except Exception as e:
con.rollback()
print(e)
print("Error: Check your inputs.")
def addUserToGroup():
global cur
row = {}
row["user_id"] = input(
"Enter the ID of the user who wants to join a group: ")
row["group_id"] = input(
"Enter the ID of the group that the user wants to join: ")
try:
query = "INSERT INTO BELONGS_TO VALUES(%(user_id)s, %(group_id)s);"
cur.execute(query, row)
con.commit()
except Exception as e:
con.rollback()
print(e)
print("Error: Check your inputs.")
def makeUserAdmin():
global cur
row = {}
row["user_id"] = input(
"Enter the ID of the user to make him an admin of a group: ")
row["group_id"] = input(
"Enter the ID of the group for which user should be made an admin of: ")
query = "SELECT * FROM BELONGS_TO where group_id=%(group_id)s and user_id=%(user_id)s;"
if isNonEmptyQuery(query, row) == False:
print("User doesn't belong to the group or invalid query.")
return
try:
query = "INSERT INTO IS_ADMIN VALUES(%(user_id)s, %(group_id)s);"
cur.execute(query, row)
con.commit()
except Exception as e:
con.rollback()
print(e)
print("Error: Check your inputs.")
def makeUserModerator():
global cur
row = {}
row["user_id"] = input(
"Enter the ID of the user to make him an moderator of a group: ")
row["group_id"] = input(
"Enter the ID of the group for which user should be made an moderator of: ")
query = "SELECT * FROM BELONGS_TO where group_id=%(group_id)s and user_id=%(user_id)s;"
if isNonEmptyQuery(query, row) == False:
print("User doesn't belong to the group or invalid query.")
return
try:
query = "INSERT INTO IS_MODERATOR VALUES(%(user_id)s, %(group_id)s);"
cur.execute(query, row)
con.commit()
except Exception as e:
con.rollback()
print(e)
print("Error: Check your inputs.")
def makeReactionToAComment():
global cur
row = {}
row["user_id"] = input("Enter the ID of the user who made the reaction: ")
row["comment_id"] = input(
"Enter the ID of the comment in which the reaction was made: ")
print("Choose the react type by pressing the corresponding number")
print("1 . Like")
print("2 . Dislike")
print("3 . Wow")
print("4 . Heart")
print("5 . Angry")
print("6 . Haha")
reactNum = 123
try:
reactNum = int(input())
except:
print("Invalid react Type")
if reactNum == 1:
row["reactedType"] = "Like"
elif reactNum == 2:
row["reactedType"] = "Dislike"
elif reactNum == 3:
row["reactedType"] = "Wow"
elif reactNum == 4:
row["reactedType"] = "Heart"
elif reactNum == 5:
row["reactedType"] = "Angry"
elif reactNum == 6:
row["reactedType"] = "Haha"
else:
print("Invalid react Type")
return
try:
query = "INSERT INTO MAKES_A_REACT(comment_id, user_id, reacted_type) VALUES(%(comment_id)s, %(user_id)s, %(reactedType)s);"
cur.execute(query, row)
con.commit()
except Exception as e:
con.rollback()
print(e)
print("Error: Check your inputs.")
def mentionInComment():
global cur
row = {}
row["comment_id"] = input("Enter the ID of the comment: ")
row["mentioner_id"] = input(
"Enter the ID of the user who mentioned someone: ")
row["mentionee_id"] = input("Enter the ID of the user who got mentioned: ")
if checkCommentIntegrity(row["comment_id"], row["mentioner_id"]) == False:
print("Error: Comment was not created by the user who mentioned.")
return
try:
query = "INSERT INTO MENTIONS VALUES(%(mentioner_id)s, %(mentionee_id)s, %(comment_id)s);"
cur.execute(query, row)
con.commit()
except Exception as e:
con.rollback()
print(e)
print("Error: Check your inputs.")
def addCommmentsRelations():
global cur
row = {}
row["comment_id"] = input("Enter the ID of the comment: ")
row["user_id"] = input("Enter the ID of the user who made the comment: ")
row["post_id"] = input(
"Enter the ID of the post in which the comment is made: ")
try:
query = "INSERT INTO COMMENTS VALUES(%(comment_id)s, %(user_id)s, %(post_id)s);"
cur.execute(query, row)
con.commit()
except Exception as e:
con.rollback()
print(e)
print("Error: Check your inputs.")
def addSendsSpecific(message_id):
global cur
row = {}
row["sender_id"] = input("Enter the ID of the sender: ")
row["receiver_id"] = input("Enter the ID of the receiver: ")
row["message_id"] = message_id
try:
query = "INSERT INTO SENDS_SPECIFIC VALUES(%(sender_id)s, %(receiver_id)s, %(message_id)s);"
cur.execute(query, row)
con.commit()
except Exception as e:
con.rollback()
print(e)
print("Error: Check your inputs.")
def addSendsGeneral(message_id):
global cur
row = {}
row["sender_id"] = input("Enter the ID of the sender: ")
row["group_id"] = input("Enter the ID of the group: ")
row["message_id"] = message_id
if checkIsMemberOfGroup(row["sender_id"], row["group_id"]) == False:
print("Error: Sender is not a member of the group.")
return
try:
query = "INSERT INTO SENDS_GENERAL VALUES(%(sender_id)s, %(group_id)s, %(message_id)s); "
cur.execute(query, row)
con.commit()
except Exception as e:
con.rollback()
print(e)
print("Error: Check your inputs.")
def addResponds():
global cur
row = {}
row["reacter_id"] = input(
"Enter the ID of the user who reacts to the story: ")
row["story_id"] = input("Enter the ID of the story: ")
print("Choose the react type by entering the corresponding number")
print("1. Like")
print("2. Dislike")
print("3. Wow")
print("4. Heart")
print("5. Angry")
print("6. Haha")
reactNum = 123 # A random invalid number
try:
reactNum = int(input())
except:
print("Invalid react Type")
if reactNum == 1:
row["reactedType"] = "Like"
elif reactNum == 2:
row["reactedType"] = "Dislike"
elif reactNum == 3:
row["reactedType"] = "Wow"
elif reactNum == 4:
row["reactedType"] = "Heart"
elif reactNum == 5:
row["reactedType"] = "Angry"
elif reactNum == 6:
row["reactedType"] = "Haha"
else:
print("Invalid react Type")
return
try:
query = "INSERT INTO RESPONDS VALUES(%(reacter_id)s, %(story_id)s,%(reactedType)s);"
cur.execute(query, row)
con.commit()
except Exception as e:
con.rollback()
print(e)
print("Error: Check your inputs.")
def addShares():
global cur
row = {}
row["user_id"] = input("Enter the ID of user: ")
row["group_id"] = input("Enter the ID of the group: ")
row["post_id"] = input("Enter the ID of the post to share: ")
if checkIsMemberOfGroup(row["user_id"], row["group_id"]) == False:
print("Error: Sender is not a member of the group.")
return
try:
query = "INSERT INTO SHARES VALUES(%(user_id)s, %(group_id)s, %(post_id)s);"
cur.execute(query, row)
con.commit()
except Exception as e:
con.rollback()
print(e)
print("Error: Check your inputs.")
def addIsTagged():
global cur
row = {}
row["post_id"] = input("Enter the ID of the post: ")
row["user_id"] = input(
"Enter the ID of the user who is tagged in the post: ")
try:
query = "INSERT INTO IS_TAGGED VALUES(%(user_id)s, %(post_id)s); "
cur.execute(query, row)
con.commit()
except Exception as e:
con.rollback()
print(e)
print("Error: Check your inputs.")
def showUserCreationOptions():
while(1):
tmp = sp.call('clear', shell=True)
print("Choose an option from below: ")
print("1. Create a new USER.")
print("2. Create a profile for a user.")
print("3. Add an educational qualification of the user.")
print("42. Go back.")
n = input("Enter: ")
if n == '1':
addUser()
elif n == '2':
addProfile()
elif n == '3':
addEducation()
elif n == '42':
break
else:
input("Invalid input, press enter to continue.")
continue
input("Press enter to Continue.")
def showPostRelatedOptions():
while(1):
tmp = sp.call('clear', shell=True)
print("Choose an option from below: ")
print("1. Create a new post.")
print("2. Create a new comment.")
print("3. Add a comment to a post.")
print("4. Tag someone in a post.")
print("5. Mention someone in a comment.")
print("6. React to a post.")
print("7. React to a comment.")
print("8. Share a post in a group.")
print("42. Go back.")
n = input("Enter: ")
if n == '1':
addPost()
elif n == '2':
addComment()
elif n == '3':
addCommmentsRelations()
elif n == '4':
addIsTagged()
elif n == '5':
mentionInComment()
elif n == '6':
addMakesGeneralReact()
elif n == '7':
makeReactionToAComment()
elif n == '8':
addShares()
elif n == '42':
break
else:
input("Invalid input, press enter to continue.")
continue
input("Press enter to Continue.")
def showGroupRelatedOptions():
while(1):
tmp = sp.call('clear', shell=True)
print("Choose an option from below: ")
print("1. Create a new group.")
print("2. Make a new admin in a group.")
print("3. Make someone a new moderator in a group.")
print("4. Add a user to a group.")
print("5. Send a new general message in a group.")
print("6. Share a post in a group.")
print("42. Go back.")
n = input("Enter: ")
if n == '1':
addGroup()
elif n == '2':
makeUserAdmin()
elif n == '3':
makeUserModerator()
elif n == '4':
addUserToGroup()
elif | |
-np.log10(p_val), gene_name, ha="center", va="center", fontsize=8))
adjust_text(texts, arrowprops=dict(width=0.15, headwidth=0, color='gray', alpha=0.6), ax=ax)
# save the final result
path, plot_name = get_path_and_name_from_kwargs(name="volcano_{g1}_{g2}_annotation_{p}", g1=g1, g2=g2,
p=col_mapping[col].replace(' ', '_'), **kwargs)
save_plot_func(fig, path, plot_name, save_volcano_results, **kwargs)
# TODO scatter plot of significant genes
return fig, (ax, ax_unique_down, ax_unique_up)
@save_plot("pca_overview")
def save_pca_results(
pca_data: pd.DataFrame, pca_fit: PCA = None, normalize: bool = True, intensity_label: str = "Intensity",
color_map: Optional[dict] = None, show_suptitle: bool = True, **kwargs
) -> Tuple[plt.Figure, plt.Axes]:
"""
Saves image containing the pca results
Parameters
----------
pca_data:
DataFrame containing transformed/dimensionally-reduced data with which PCA was performed
pca_fit:
PCA object that was fitted to normalized input data
normalize:
Boolean whether the transformed data should be normalized with the singular values before plotting
intensity_label
color_map
show_suptitle:
Should the figure title be shown
"""
plt.close("all")
n_components = pca_data.shape[0]
singular_values = np.ones(n_components)
base_color_map = {value: f"C{i}" for i, value in enumerate(pca_data.columns.get_level_values(0).unique())}
color_map = {} if color_map is None else color_map
base_color_map.update(color_map)
if normalize and pca_fit is None:
warnings.warn("Normalizing not possible when pca_fit is None")
elif normalize and pca_fit is not None:
singular_values = pca_fit.singular_values_
fig, axarr = plt.subplots(n_components, n_components, figsize=(14, 14))
for row in range(n_components):
row_pc = row + 1
for col in range(n_components):
col_pc = col + 1
if row > col:
ax = axarr[col, row]
ax.scatter(
pca_data.loc[f"PC_{row_pc}"] / singular_values[row],
pca_data.loc[f"PC_{col_pc}"] / singular_values[col],
c=[base_color_map.get(name, "blue") for name in pca_data.columns.get_level_values(0)])
ax.set_xlabel(f"PC_{row_pc}")
ax.set_ylabel(f"PC_{col_pc}")
if show_suptitle:
fig.suptitle(intensity_label, fontsize="xx-large")
legend_elements = get_legend_elements(labels=pca_data.columns.get_level_values(0).unique(), color_map=base_color_map)
fig.legend(handles=legend_elements, bbox_to_anchor=(1.02, 0.5), loc="center left", frameon=False, fontsize=20)
fig.tight_layout(rect=[0, 0.03, 1, 0.95])
return fig, axarr
@save_csvs({"protein_intensities": "{pathway}_protein_intensities",
"significances": "{pathway}_pvalues"})
def save_pathway_analysis_results(
protein_intensities: pd.DataFrame, significances: pd.DataFrame = None, pathway: str = "",
show_suptitle: bool = False, threshold: float = 0.05, intensity_label: str = "Intensity", **kwargs
) -> Tuple[plt.Figure, plt.Axes]:
f"""
Parameters
----------
protein_intensities
significances
pathway
show_suptitle
threshold
intensity_label
kwargs
{_get_path_and_name_kwargs_doc}
Returns
-------
"""
plt.close("all")
level_keys = list(protein_intensities.columns.get_level_values(0).unique())
n_rows, n_cols = get_number_rows_cols_for_fig(protein_intensities.index)
fig, axarr = plt.subplots(n_rows, n_cols, figsize=(n_cols * 4, int(n_rows * len(level_keys) / 1.5)))
color_map = {value: f"C{i}" for i, value in enumerate(level_keys)}
color_map.update(kwargs.get("color_map", {})) # TODO move to function deceleration
if show_suptitle:
fig.suptitle(pathway)
for protein, (pos, ax) in zip(protein_intensities.index, np.ndenumerate(axarr)):
ax.scatter(protein_intensities.loc[protein], [level_keys.index(c) for c in protein_intensities.columns.get_level_values(0)],
c=[color_map[c] for c in protein_intensities.columns.get_level_values(0)])
ax.set_title(protein)
ax.set_ylim((-1, len(level_keys)))
ax.set_yticks([i for i in range(len(level_keys))])
ax.set_yticklabels(level_keys)
ax.set_xlabel(intensity_label)
fig.tight_layout(rect=[0, 0.03, 1, 0.95])
path, plot_name = get_path_and_name_from_kwargs(name="{pathway}_no_labels", pathway=pathway, **kwargs)
save_plot_func(fig, path, plot_name, save_pathway_analysis_results, **kwargs)
if significances is not None:
for protein, (pos, ax) in zip(protein_intensities.index, np.ndenumerate(axarr)):
# adjust axis height based on number of significant differences
to_annotate = significances.loc[protein]
to_annotate = to_annotate[to_annotate <= threshold]
xmin, xmax = ax.get_xbound()
ax.set_xlim(right=xmax * (1 + to_annotate.shape[0] * 0.015))
for i, (index, pval) in enumerate(to_annotate.items()):
plot_annotate_line(ax, level_keys.index(index[0]), level_keys.index(index[1]), xmax * (1 + i * 0.015) - 0.005, pval)
fig.tight_layout(rect=[0, 0.03, 1, 0.95])
path, plot_name = get_path_and_name_from_kwargs(name="{pathway}", pathway=pathway, **kwargs)
save_plot_func(fig, path, plot_name, save_pathway_analysis_results, **kwargs)
return fig, axarr
@save_plot("boxplot")
def save_boxplot_results(
protein_intensities: pd.DataFrame, intensity_label: str = "Intensity",
plot: Optional[Tuple[plt.Figure, plt.Axes]] = None, vertical: bool = False, **kwargs
) -> Tuple[plt.Figure, plt.Axes]:
f"""
Boxplot of intensities
Parameters
----------
protein_intensities
DataFrame where each column are the intensities to boxplot, column names will be used as labels
intensity_label
label of the x axis of the plot
plot
asd
vertical
kwargs
{_get_path_and_name_kwargs_doc}
"""
# TODO give colors to the different groups
if plot is None:
plt.close("all")
fig, ax = plt.subplots(figsize=(14, 1 + len(protein_intensities.columns) // 3))
else:
fig, ax = plot
# indicate overall median with a line
median_value = np.nanmedian(protein_intensities.values.flatten())
line_kwargs = dict(color="black", alpha=0.5, linewidth=1)
if vertical:
ax.axhline(median_value, **line_kwargs)
else:
ax.axvline(median_value, **line_kwargs)
# convert the data into a list of lists and filter nan values
data = [
protein_intensities.loc[~pd.isna(protein_intensities.loc[:, c]), c].tolist()
for c in protein_intensities.columns
]
ax.boxplot(data, vert=vertical, labels=protein_intensities.columns)
if vertical:
ax.set_ylabel(intensity_label)
else:
ax.set_xlabel(intensity_label)
fig.tight_layout(rect=[0, 0.03, 1, 0.95])
return fig, ax
@save_plot("rel_std_{experiment_name}")
def save_relative_std_results(
intensities: pd.DataFrame, experiment_name: str, intensity_label: str = "Intensity",
show_suptitle: bool = False, bins=(10, 20, 30), cmap: dict = None, **kwargs
) -> Tuple[plt.Figure, plt.Axes]:
f"""
Relative standard deviations of passed intensities with color marking based on the specified bins and color map
Parameters
----------
intensities
DataFrame with experiment intensities to be plotted
experiment_name
name of the overall experiment
intensity_label
name of the intensities for the x label
show_suptitle
should figure suptitle be shown
bins
in which bins should the standard deviations be categorized
cmap
mapping for the digitized labels to a color
kwargs
{_get_path_and_name_kwargs_doc}
Returns
-------
figure and axis of the plot
"""
# TODO add percentage to absolute numbers
# TODO see if code could be optimized
plt.close("all")
bins = np.array(bins)
if "Log_2" in intensity_label:
bins = np.log2(bins)
default_cm = {0: "navy", 1: "royalblue", 2: "skyblue", 3: "darkgray"}
if cmap is not None:
default_cm.update(cmap)
relative_std_percent = intensities.std(axis=1) / intensities.mean(axis=1) * 100
inds = np.digitize(relative_std_percent, bins).astype(int)
plot_colors = pd.Series([default_cm.get(x, "black") for x in inds], index=relative_std_percent.index)
color_counts = {color: (plot_colors == color).sum() for color in plot_colors.unique()}
fig, ax = plt.subplots(1, 1, figsize=(14, 7))
ax.scatter(intensities.mean(axis=1), relative_std_percent, c=plot_colors, marker="o", s=(2 * 72. / fig.dpi) ** 2,
alpha=0.8)
if show_suptitle:
fig.suptitle(experiment_name)
ax.set_xlabel(f"Mean {intensity_label}")
ax.set_ylabel("Relative Standard deviation [%]")
if "Log_2" not in intensity_label:
ax.set_xscale('log')
xmin, xmax = ax.get_xbound()
cumulative_count = 0
for i, bin_ in enumerate(bins):
cumulative_count += color_counts.get(default_cm[i], 0)
ax.axhline(bin_, color=default_cm[i])
ax.text(xmin, bin_, cumulative_count)
fig.tight_layout(rect=[0, 0.03, 1, 0.95])
return fig, ax
@save_plot("detected_counts")
def save_detection_counts_results(
counts: pd.DataFrame, intensity_label: str = "Intensity", show_suptitle: bool = True, **kwargs
) -> Tuple[plt.Figure, plt.Axes]:
f"""
Parameters
----------
counts
DataFrame containing the counts to be plotted
intensity_label
label of the dataframe
show_suptitle
should the figure title be shown
kwargs
{_get_path_and_name_kwargs_doc}
Returns
-------
figure and axis of the plot
"""
plt.close("all")
n_rows_experiment, n_cols_experiment = get_number_rows_cols_for_fig(counts.columns)
fig, axarr = plt.subplots(n_rows_experiment, n_cols_experiment, squeeze=True,
figsize=(5 * n_cols_experiment, 3 * n_rows_experiment))
if show_suptitle:
fig.suptitle(f"Detection counts from {intensity_label}")
for (pos, ax), col in zip(np.ndenumerate(axarr), counts.columns):
col_data = counts.loc[:, col]
col_data = col_data[~pd.isna(col_data)]
ax.set_title(f"{col},\ntotal detected: {int(col_data.sum())}")
ax.barh(col_data.index, col_data, color="skyblue")
for y, value in zip(col_data.index, col_data):
ax.text(col_data.max() / 2, y, value,
verticalalignment='center', horizontalalignment='center')
ax.set_yticks(col_data.index)
ax.set_yticklabels([f"detected in {i} replicates" for i in col_data.index])
ax.set_xlabel("Counts")
fig.tight_layout(rect=[0, 0.03, 1, 0.95])
return fig, axarr
@save_plot("kde")
def save_kde_results(
intensities: pd.DataFrame, quantile_range: Optional[np.array] = None, n_points: int = 1000,
cmap: Union[str, colors.Colormap] = "viridis", plot: Optional[Tuple[plt.Figure, plt.Axes]] = None,
intensity_label: str = "Intensity", **kwargs
) -> Tuple[plt.Figure, plt.Axes]:
f"""
Parameters
----------
intensities
quantile_range
n_points
cmap
plot
intensity_label
kwargs
{_get_path_and_name_kwargs_doc}
Returns
-------
"""
if plot is not None:
fig, ax = plot
else:
plt.close("all")
fig, ax = plt.subplots(1, 1)
if quantile_range is None:
quantile_range = np.arange(0.05, 1, 0.05)
for col in intensities.columns:
intensity_quantiles = intensities.loc[:, col].quantile(quantile_range)
kde_fit = gaussian_kde(intensities.loc[~pd.isna(intensities.loc[:, col]), col])
x = np.linspace(intensities.loc[:, col].min() * 0.9, intensities.loc[:, col].max() * 1.1, n_points)
y = kde_fit.evaluate(x)
# Create a set of line segments so that we can color them individually
# This creates the points as a N x 1 x 2 array so that we can stack points
# together easily to get the segments. The segments array for line collection
# needs to be (numlines) x (points per line) x 2 (for x and y)
points = np.array([x, y]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
#
norm = QuantileNormalize(quantiles=intensity_quantiles)
lc = LineCollection(segments, cmap=cmap, norm=norm, alpha=0.5)
# Set the values used for colormapping
lc.set_array(x)
lc.set_linewidth(1)
line = ax.add_collection(lc)
ax.set_ylabel("Density")
ax.set_xlabel(intensity_label)
ax.autoscale_view()
fig.tight_layout(rect=[0, 0.03, 1, 0.95])
return fig, ax
@save_plot("n_proteins_vs_quantile")
def save_n_proteins_vs_quantile_results(
quantiles: pd.DataFrame, n_proteins: pd.Series, nstd: int = 1, cmap: Union[str, colors.Colormap] = "viridis",
plot: Optional[Tuple[plt.Figure, plt.Axes]] = None, cbar_ax: Optional[plt.Axes] = None,
intensity_label: str = "Intensity", fill_between: bool = False, **kwargs
) -> Tuple[plt.Figure, Tuple[plt.Axes, plt.Axes]]:
f"""
Parameters
----------
quantiles
n_proteins
nstd
cmap
plot
cbar_ax
intensity_label
fill_between
kwargs
{_get_path_and_name_kwargs_doc}
Returns
-------
"""
if plot is not None:
fig, ax = plot
else:
plt.close("all")
fig, ax = plt.subplots(1, 1, figsize=(14, 7))
if not isinstance(cmap, colors.Colormap):
cmap = cm.get_cmap(cmap)
m = n_proteins.sort_values()
for quant in quantiles.index:
ax.scatter(quantiles.loc[quant, :], n_proteins, c=[cmap(quant)] * len(n_proteins), alpha=0.5)
popt, pcov = curve_fit(linear, n_proteins, quantiles.loc[quant, :])
fit = linear(m, *popt)
ax.plot(fit, m, color=cmap(quant))
if fill_between:
perr = np.sqrt(np.diag(pcov))
popt_up = popt + nstd * perr
popt_dw = popt - nstd * perr
| |
<gh_stars>100-1000
# Copyright Contributors to the OpenCue Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for classes related to hosts."""
import enum
import os
import time
from opencue import Cuebot
from opencue.compiled_proto import comment_pb2
from opencue.compiled_proto import host_pb2
import opencue.wrappers.comment
# pylint: disable=cyclic-import
import opencue.wrappers.proc
class Host(object):
"""This class contains the grpc implementation related to a Host."""
class HardwareState(enum.IntEnum):
"""Enum representing the hardware state of the host."""
UP = host_pb2.UP
DOWN = host_pb2.DOWN
REBOOTING = host_pb2.REBOOTING
REBOOT_WHEN_IDLE = host_pb2.REBOOT_WHEN_IDLE
REPAIR = host_pb2.REPAIR
class HostTagType(enum.IntEnum):
"""Enum representing the type of a host tag."""
MANUAL = host_pb2.MANUAL
HARDWARE = host_pb2.HARDWARE
ALLOC = host_pb2.ALLOC
HOSTNAME = host_pb2.HOSTNAME
class LockState(enum.IntEnum):
"""Enum representing whether the host is locked."""
OPEN = host_pb2.OPEN
LOCKED = host_pb2.LOCKED
NIMBY_LOCKED = host_pb2.NIMBY_LOCKED
class ThreadMode(enum.IntEnum):
"""Enum representing the thread mode of the host."""
AUTO = host_pb2.AUTO
ALL = host_pb2.ALL
VARIABLE = host_pb2.VARIABLE
def __init__(self, host=None):
self.data = host
self.__id = host.id
self.stub = Cuebot.getStub('host')
def lock(self):
"""Locks the host so that it no longer accepts new frames"""
self.stub.Lock(host_pb2.HostLockRequest(host=self.data), timeout=Cuebot.Timeout)
def unlock(self):
"""Unlocks the host.
Cancels any actions that were waiting for all running frames to finish."""
self.stub.Unlock(host_pb2.HostUnlockRequest(host=self.data), timeout=Cuebot.Timeout)
def delete(self):
"""Deletes the host from the cuebot"""
self.stub.Delete(host_pb2.HostDeleteRequest(host=self.data), timeout=Cuebot.Timeout)
def getProcs(self):
"""Returns a list of procs under this host.
:rtype: list<opencue.wrappers.proc.Proc>
:return: A list of procs under this host
"""
response = self.stub.GetProcs(host_pb2.HostGetProcsRequest(host=self.data),
timeout=Cuebot.Timeout)
return [opencue.wrappers.proc.Proc(p) for p in response.procs.procs]
def redirectToJob(self, procs, job):
"""Unbooks and redirects the proc to the specified job. Optionally
kills the proc immediately.
:param procs: list<opencue.wrappers.proc.Proc>
:param job: job id
"""
self.stub.RedirectToJob(
host_pb2.HostRedirectToJobRequest(host=self.data,
proc_names=[proc.data.id for proc in procs],
job_id=job.data.id), timeout=Cuebot.Timeout)
def getRenderPartitions(self):
"""Returns a list of render partitions associated with this host
:rtype: list<renderPartition_pb2.RenderPartition>
:return: list of render partitions under this host
"""
response = self.stub.GetRenderPartitions(host_pb2.HostGetRenderPartitionsRequest(
host=self.data), timeout=Cuebot.Timeout)
partitionSeq = response.render_partitions
return partitionSeq.render_partitions
def rebootWhenIdle(self):
"""Sets the machine to reboot once idle.
The host will no longer accept new frames."""
self.stub.RebootWhenIdle(host_pb2.HostRebootWhenIdleRequest(host=self.data),
timeout=Cuebot.Timeout)
def reboot(self):
"""Causes the host to kill all running frames and reboot the machine."""
self.stub.Reboot(host_pb2.HostRebootRequest(host=self.data), timeout=Cuebot.Timeout)
def addTags(self, tags):
"""Adds tags to a host.
:type tags: list<str>
:param tags: The tags to add
"""
self.stub.AddTags(host_pb2.HostAddTagsRequest(host=self.data, tags=tags),
timeout=Cuebot.Timeout)
def removeTags(self, tags):
"""Removes tags from this host.
:type tags: list<str>
:param tags: The tags to remove
"""
self.stub.RemoveTags(host_pb2.HostRemoveTagsRequest(host=self.data, tags=tags),
timeout=Cuebot.Timeout)
def renameTag(self, oldTag, newTag):
"""Renames a tag.
:type oldTag: str
:param oldTag: old tag to rename
:type newTag: str
:param newTag: new name for the tag
"""
self.stub.RenameTag(
host_pb2.HostRenameTagRequest(host=self.data, old_tag=oldTag, new_tag=newTag),
timeout=Cuebot.Timeout)
def setAllocation(self, allocation):
"""Sets the host to the given allocation.
:type allocation: opencue.wrappers.allocation.Allocation
:param allocation: allocation to put the host under
"""
self.stub.SetAllocation(
host_pb2.HostSetAllocationRequest(host=self.data, allocation_id=allocation.id()),
timeout=Cuebot.Timeout)
def addComment(self, subject, message):
"""Appends a comment to the host's comment list.
:type subject: str
:param subject: Subject data
:type message: str
:param message: Message data
"""
comment = comment_pb2.Comment(
user=os.getenv("USER", "unknown"),
subject=subject,
message=message or " ",
timestamp=0
)
self.stub.AddComment(host_pb2.HostAddCommentRequest(host=self.data, new_comment=comment),
timeout=Cuebot.Timeout)
def getComments(self):
"""Returns the host's comment list.
:rtype: list<opencue.wrappers.comment.Comment>
:return: the comment list of the host
"""
response = self.stub.GetComments(host_pb2.HostGetCommentsRequest(host=self.data),
timeout=Cuebot.Timeout)
commentSeq = response.comments
return [opencue.wrappers.comment.Comment(c) for c in commentSeq.comments]
def setHardwareState(self, state):
"""Sets the host hardware state.
:type state: host_pb2.HardwareState
:param state: state to set host to
"""
self.stub.SetHardwareState(
host_pb2.HostSetHardwareStateRequest(host=self.data, state=state),
timeout=Cuebot.Timeout)
def setOs(self, osName):
"""Sets the host operating system.
:type osName: string
:param osName: os value to set host to
"""
self.stub.SetOs(host_pb2.HostSetOsRequest(host=self.data, os=osName),
timeout=Cuebot.Timeout)
def setThreadMode(self, mode):
"""Sets the host thread mode.
:type mode: host_pb2.ThreadMode
:param mode: ThreadMode to set host to
"""
self.stub.SetThreadMode(host_pb2.HostSetThreadModeRequest(host=self.data, mode=mode),
timeout=Cuebot.Timeout)
def id(self):
"""Returns the id of the host.
:rtype: str
:return: id of the host
"""
if not hasattr(self, "__id"):
self.__id = self.data.id
return self.__id
def name(self):
"""Returns the name of the host.
:rtype: str
:return: name of the host
"""
return self.data.name
def isNimbyEnabled(self):
"""Returns true if nimby is enabled.
:rtype: bool
:return: True if nimby is enabled
"""
return self.data.nimby_enabled
def isUp(self):
"""Returns True if the host hardware state indicates the machine is up.
:rtype: bool
:return: True if the host is up
"""
return self.data.state == host_pb2.HardwareState.Value('UP')
def isLocked(self):
"""Returns True if the host is locked.
:rtype: bool
:return: True if the host is locked
"""
return self.data.lock_state == host_pb2.LockState.Value('LOCKED')
def isCommented(self):
"""Returns true if the host has a comment.
:rtype: bool
:return: whether the host has a comment
"""
return self.data.has_comment
def cores(self):
"""Returns the total number of cores the host has.
:rtype: float
:return: total number of host cores
"""
return self.data.cores
def coresReserved(self):
"""Returns the number of cores the host has which are currently reserved.
:rtype: float
:return: number of cores reserved
"""
return self.data.cores - self.data.idle_ores
def coresIdle(self):
"""Returns the number of cores the host currently has idel.
:rtype: float
:return: number of cores idle
"""
return self.data.idle_cores
def mem(self):
"""Returns the amount of memory the host has in kb.
:rtype: int
:return: amount of memory in kb
"""
return self.data.memory
def memReserved(self):
"""Returns the amount of memory the host has currently reserved.
:rtype: int
:return: amount of memory reserved in kb
"""
return self.data.memory - self.data.idle_memory
def memIdle(self):
"""Returns the amount of memory the host currently has idle.
:rtype: int
:return: amount of idle memory in kb
"""
return self.data.idle_memory
def memUsed(self):
"""Returns the amount of memory the host currently has in use.
:rtype: int
:return: amount of in-use memory in kb
"""
return self.data.total_memory - self.data.free_memory
def memTotal(self):
"""Returns the total amount of memory the host has.
:rtype: int
:return: total amount of memory on host
"""
return self.data.total_memory
def memFree(self):
"""Returns the amount of memory the host currently has free.
:rtype: int
:return: amount of free memory in kb
"""
return self.data.free_memory
def swapUsed(self):
"""Returns the amount of swap space the host has in use.
:rtype: int
:return: amount of swap used in kb
"""
return self.data.total_swap - self.data.free_swap
def swapTotal(self):
"""Returns the total amount of swap space the host has.
:rtype: int
:return: total amount of swap space in kb
"""
return self.data.total_swap
def swapFree(self):
"""Returns the amount of free swap space the host has.
:rtype: int
:return: amount of free swap space in kb
"""
return self.data.free_swap
def mcpUsed(self):
"""Returns the amount of /mcp space the host is using.
:rtype: int
:return: amount of mcp used in kb
"""
return self.mcpTotal() - self.mcpFree()
def mcpTotal(self):
"""Returns the total amount of /mcp space the host has.
:rtype: int
:return: total amount of mcp in kb
"""
return self.data.total_mcp
def mcpFree(self):
"""Returns the amount of free /mcp space the host has.
:rtype: int
:return: amount of mcp free in kb
"""
return self.data.free_mcp
def load(self):
"""Returns the host load average.
:rtype: int
:return: host load average * 100
"""
return self.data.load
def bootTime(self):
"""Returns the time the host was booted.
:rtype: int
:return: host boot time as an epoch
"""
return self.data.boot_time
def pingTime(self):
"""Returns the last time the host sent a status report.
:rtype: int
:return: last ping time as an epoch
"""
return self.data.ping_time
def pingLast(self):
"""Returns the number of seconds since the last time the host sent a status report.
:rtype: int
:return: seconds since last ping
"""
return int(time.time() - self.pingTime())
def tags(self):
"""Returns the tags the host has.
:rtype: list<str>
:return: list of tags applied to the host
"""
return self.data.tags
def state(self):
"""Returns the hardware state of the host.
:rtype: host_pb2.HardwareState
:return: the hardware state of the host
"""
return self.data.state
def lockState(self):
"""Returns the lock state of the host.
:rtype: host_pb2.LockState
:return: | |
328, 32, 328, 41),
woosh.Token(woosh.OP, ',', 328, 41, 328, 42),
woosh.Token(woosh.NAME, 'self', 328, 43, 328, 47),
woosh.Token(woosh.OP, '.', 328, 47, 328, 48),
woosh.Token(woosh.NAME, '_name', 328, 48, 328, 53),
woosh.Token(woosh.OP, ',', 328, 53, 328, 54),
woosh.Token(woosh.OP, '**', 328, 55, 328, 57),
woosh.Token(woosh.NAME, 'self', 328, 57, 328, 61),
woosh.Token(woosh.OP, '.', 328, 61, 328, 62),
woosh.Token(woosh.NAME, '_kwargs', 328, 62, 328, 69),
woosh.Token(woosh.OP, ')', 328, 69, 328, 70),
woosh.Token(woosh.OP, ',', 328, 70, 328, 71),
woosh.Token(woosh.NAME, 'self', 328, 72, 328, 76),
woosh.Token(woosh.OP, '.', 328, 76, 328, 77),
woosh.Token(woosh.NAME, '_args', 328, 77, 328, 82),
woosh.Token(woosh.NEWLINE, '\r\n', 328, 82, 329, 0),
woosh.Token(woosh.COMMENT, '# In-place Operations *********************************************************#', 331, 0, 331, 80),
woosh.Token(woosh.DEDENT, '', 333, 0, 333, 0),
woosh.Token(woosh.DEDENT, '', 333, 0, 333, 0),
woosh.Token(woosh.DEDENT, '', 333, 0, 333, 0),
woosh.Token(woosh.NAME, 'def', 333, 0, 333, 3),
woosh.Token(woosh.NAME, 'iadd', 333, 4, 333, 8),
woosh.Token(woosh.OP, '(', 333, 8, 333, 9),
woosh.Token(woosh.NAME, 'a', 333, 9, 333, 10),
woosh.Token(woosh.OP, ',', 333, 10, 333, 11),
woosh.Token(woosh.NAME, 'b', 333, 12, 333, 13),
woosh.Token(woosh.OP, ')', 333, 13, 333, 14),
woosh.Token(woosh.OP, ':', 333, 14, 333, 15),
woosh.Token(woosh.NEWLINE, '\r\n', 333, 15, 334, 0),
woosh.Token(woosh.INDENT, ' ', 334, 0, 334, 4),
woosh.Token(woosh.STRING, '"Same as a += b."', 334, 4, 334, 21),
woosh.Token(woosh.NEWLINE, '\r\n', 334, 21, 335, 0),
woosh.Token(woosh.NAME, 'a', 335, 4, 335, 5),
woosh.Token(woosh.OP, '+=', 335, 6, 335, 8),
woosh.Token(woosh.NAME, 'b', 335, 9, 335, 10),
woosh.Token(woosh.NEWLINE, '\r\n', 335, 10, 336, 0),
woosh.Token(woosh.NAME, 'return', 336, 4, 336, 10),
woosh.Token(woosh.NAME, 'a', 336, 11, 336, 12),
woosh.Token(woosh.NEWLINE, '\r\n', 336, 12, 337, 0),
woosh.Token(woosh.DEDENT, '', 338, 0, 338, 0),
woosh.Token(woosh.NAME, 'def', 338, 0, 338, 3),
woosh.Token(woosh.NAME, 'iand', 338, 4, 338, 8),
woosh.Token(woosh.OP, '(', 338, 8, 338, 9),
woosh.Token(woosh.NAME, 'a', 338, 9, 338, 10),
woosh.Token(woosh.OP, ',', 338, 10, 338, 11),
woosh.Token(woosh.NAME, 'b', 338, 12, 338, 13),
woosh.Token(woosh.OP, ')', 338, 13, 338, 14),
woosh.Token(woosh.OP, ':', 338, 14, 338, 15),
woosh.Token(woosh.NEWLINE, '\r\n', 338, 15, 339, 0),
woosh.Token(woosh.INDENT, ' ', 339, 0, 339, 4),
woosh.Token(woosh.STRING, '"Same as a &= b."', 339, 4, 339, 21),
woosh.Token(woosh.NEWLINE, '\r\n', 339, 21, 340, 0),
woosh.Token(woosh.NAME, 'a', 340, 4, 340, 5),
woosh.Token(woosh.OP, '&=', 340, 6, 340, 8),
woosh.Token(woosh.NAME, 'b', 340, 9, 340, 10),
woosh.Token(woosh.NEWLINE, '\r\n', 340, 10, 341, 0),
woosh.Token(woosh.NAME, 'return', 341, 4, 341, 10),
woosh.Token(woosh.NAME, 'a', 341, 11, 341, 12),
woosh.Token(woosh.NEWLINE, '\r\n', 341, 12, 342, 0),
woosh.Token(woosh.DEDENT, '', 343, 0, 343, 0),
woosh.Token(woosh.NAME, 'def', 343, 0, 343, 3),
woosh.Token(woosh.NAME, 'iconcat', 343, 4, 343, 11),
woosh.Token(woosh.OP, '(', 343, 11, 343, 12),
woosh.Token(woosh.NAME, 'a', 343, 12, 343, 13),
woosh.Token(woosh.OP, ',', 343, 13, 343, 14),
woosh.Token(woosh.NAME, 'b', 343, 15, 343, 16),
woosh.Token(woosh.OP, ')', 343, 16, 343, 17),
woosh.Token(woosh.OP, ':', 343, 17, 343, 18),
woosh.Token(woosh.NEWLINE, '\r\n', 343, 18, 344, 0),
woosh.Token(woosh.INDENT, ' ', 344, 0, 344, 4),
woosh.Token(woosh.STRING, '"Same as a += b, for a and b sequences."', 344, 4, 344, 44),
woosh.Token(woosh.NEWLINE, '\r\n', 344, 44, 345, 0),
woosh.Token(woosh.NAME, 'if', 345, 4, 345, 6),
woosh.Token(woosh.NAME, 'not', 345, 7, 345, 10),
woosh.Token(woosh.NAME, 'hasattr', 345, 11, 345, 18),
woosh.Token(woosh.OP, '(', 345, 18, 345, 19),
woosh.Token(woosh.NAME, 'a', 345, 19, 345, 20),
woosh.Token(woosh.OP, ',', 345, 20, 345, 21),
woosh.Token(woosh.STRING, "'__getitem__'", 345, 22, 345, 35),
woosh.Token(woosh.OP, ')', 345, 35, 345, 36),
woosh.Token(woosh.OP, ':', 345, 36, 345, 37),
woosh.Token(woosh.NEWLINE, '\r\n', 345, 37, 346, 0),
woosh.Token(woosh.INDENT, ' ', 346, 0, 346, 8),
woosh.Token(woosh.NAME, 'msg', 346, 8, 346, 11),
woosh.Token(woosh.OP, '=', 346, 12, 346, 13),
woosh.Token(woosh.STRING, '"\'%s\' object can\'t be concatenated"', 346, 14, 346, 49),
woosh.Token(woosh.OP, '%', 346, 50, 346, 51),
woosh.Token(woosh.NAME, 'type', 346, 52, 346, 56),
woosh.Token(woosh.OP, '(', 346, 56, 346, 57),
woosh.Token(woosh.NAME, 'a', 346, 57, 346, 58),
woosh.Token(woosh.OP, ')', 346, 58, 346, 59),
woosh.Token(woosh.OP, '.', 346, 59, 346, 60),
woosh.Token(woosh.NAME, '__name__', 346, 60, 346, 68),
woosh.Token(woosh.NEWLINE, '\r\n', 346, 68, 347, 0),
woosh.Token(woosh.NAME, 'raise', 347, 8, 347, 13),
woosh.Token(woosh.NAME, 'TypeError', 347, 14, 347, 23),
woosh.Token(woosh.OP, '(', 347, 23, 347, 24),
woosh.Token(woosh.NAME, 'msg', 347, 24, 347, 27),
woosh.Token(woosh.OP, ')', 347, 27, 347, 28),
woosh.Token(woosh.NEWLINE, '\r\n', 347, 28, 348, 0),
woosh.Token(woosh.DEDENT, ' ', 348, 0, 348, 4),
woosh.Token(woosh.NAME, 'a', 348, 4, 348, 5),
woosh.Token(woosh.OP, '+=', 348, 6, 348, 8),
woosh.Token(woosh.NAME, 'b', 348, 9, 348, 10),
woosh.Token(woosh.NEWLINE, '\r\n', 348, 10, 349, 0),
woosh.Token(woosh.NAME, 'return', 349, 4, 349, 10),
woosh.Token(woosh.NAME, 'a', 349, 11, 349, 12),
woosh.Token(woosh.NEWLINE, '\r\n', 349, 12, 350, 0),
woosh.Token(woosh.DEDENT, '', 351, 0, 351, 0),
woosh.Token(woosh.NAME, 'def', 351, 0, 351, 3),
woosh.Token(woosh.NAME, 'ifloordiv', 351, 4, 351, 13),
woosh.Token(woosh.OP, '(', 351, 13, 351, 14),
woosh.Token(woosh.NAME, 'a', 351, 14, 351, 15),
woosh.Token(woosh.OP, ',', 351, 15, 351, 16),
woosh.Token(woosh.NAME, 'b', 351, 17, 351, 18),
woosh.Token(woosh.OP, ')', 351, 18, 351, 19),
woosh.Token(woosh.OP, ':', 351, 19, 351, 20),
woosh.Token(woosh.NEWLINE, '\r\n', 351, 20, 352, 0),
woosh.Token(woosh.INDENT, ' ', 352, 0, 352, 4),
woosh.Token(woosh.STRING, '"Same as a //= b."', 352, 4, 352, 22),
woosh.Token(woosh.NEWLINE, '\r\n', 352, 22, 353, 0),
woosh.Token(woosh.NAME, 'a', 353, 4, 353, 5),
woosh.Token(woosh.OP, '//=', 353, 6, 353, 9),
woosh.Token(woosh.NAME, 'b', 353, 10, 353, 11),
woosh.Token(woosh.NEWLINE, '\r\n', 353, 11, 354, 0),
woosh.Token(woosh.NAME, 'return', 354, 4, 354, 10),
woosh.Token(woosh.NAME, 'a', 354, 11, 354, 12),
woosh.Token(woosh.NEWLINE, '\r\n', 354, 12, 355, 0),
woosh.Token(woosh.DEDENT, '', 356, 0, 356, 0),
woosh.Token(woosh.NAME, 'def', 356, 0, 356, 3),
woosh.Token(woosh.NAME, 'ilshift', 356, 4, 356, 11),
woosh.Token(woosh.OP, '(', 356, 11, 356, 12),
woosh.Token(woosh.NAME, 'a', 356, 12, 356, 13),
woosh.Token(woosh.OP, ',', 356, 13, 356, 14),
woosh.Token(woosh.NAME, 'b', 356, 15, 356, 16),
woosh.Token(woosh.OP, ')', 356, 16, 356, 17),
woosh.Token(woosh.OP, ':', 356, 17, 356, 18),
woosh.Token(woosh.NEWLINE, '\r\n', 356, 18, 357, 0),
woosh.Token(woosh.INDENT, ' ', 357, 0, 357, 4),
woosh.Token(woosh.STRING, '"Same as a <<= b."', 357, 4, 357, 22),
woosh.Token(woosh.NEWLINE, '\r\n', 357, 22, 358, 0),
woosh.Token(woosh.NAME, 'a', 358, 4, 358, 5),
woosh.Token(woosh.OP, '<<=', 358, 6, 358, 9),
woosh.Token(woosh.NAME, 'b', 358, 10, 358, 11),
woosh.Token(woosh.NEWLINE, '\r\n', 358, 11, 359, 0),
woosh.Token(woosh.NAME, 'return', 359, 4, 359, 10),
woosh.Token(woosh.NAME, 'a', 359, 11, 359, 12),
woosh.Token(woosh.NEWLINE, '\r\n', 359, 12, 360, 0),
woosh.Token(woosh.DEDENT, '', 361, 0, 361, 0),
woosh.Token(woosh.NAME, 'def', 361, 0, 361, 3),
woosh.Token(woosh.NAME, 'imod', 361, 4, 361, 8),
woosh.Token(woosh.OP, '(', 361, 8, 361, 9),
woosh.Token(woosh.NAME, 'a', 361, 9, 361, 10),
woosh.Token(woosh.OP, ',', 361, 10, 361, 11),
woosh.Token(woosh.NAME, 'b', 361, 12, 361, 13),
woosh.Token(woosh.OP, ')', 361, 13, 361, 14),
woosh.Token(woosh.OP, ':', 361, 14, 361, 15),
woosh.Token(woosh.NEWLINE, '\r\n', 361, 15, 362, 0),
woosh.Token(woosh.INDENT, ' ', 362, 0, 362, 4),
woosh.Token(woosh.STRING, '"Same as a %= b."', 362, 4, 362, 21),
woosh.Token(woosh.NEWLINE, '\r\n', 362, 21, 363, 0),
woosh.Token(woosh.NAME, 'a', 363, 4, 363, 5),
woosh.Token(woosh.OP, '%=', 363, 6, 363, 8),
woosh.Token(woosh.NAME, 'b', 363, 9, 363, 10),
woosh.Token(woosh.NEWLINE, '\r\n', 363, 10, 364, 0),
woosh.Token(woosh.NAME, 'return', 364, 4, 364, 10),
woosh.Token(woosh.NAME, 'a', 364, 11, 364, 12),
woosh.Token(woosh.NEWLINE, '\r\n', 364, 12, 365, 0),
woosh.Token(woosh.DEDENT, '', 366, 0, 366, 0),
woosh.Token(woosh.NAME, 'def', 366, 0, 366, 3),
woosh.Token(woosh.NAME, 'imul', 366, 4, 366, 8),
woosh.Token(woosh.OP, '(', 366, 8, 366, 9),
woosh.Token(woosh.NAME, 'a', 366, 9, 366, 10),
woosh.Token(woosh.OP, ',', 366, 10, 366, 11),
woosh.Token(woosh.NAME, 'b', 366, 12, 366, 13),
woosh.Token(woosh.OP, ')', 366, 13, 366, 14),
woosh.Token(woosh.OP, ':', 366, 14, 366, 15),
woosh.Token(woosh.NEWLINE, '\r\n', 366, 15, 367, 0),
woosh.Token(woosh.INDENT, ' ', 367, 0, 367, 4),
woosh.Token(woosh.STRING, '"Same as a *= b."', 367, 4, 367, 21),
woosh.Token(woosh.NEWLINE, '\r\n', 367, 21, 368, 0),
woosh.Token(woosh.NAME, 'a', 368, 4, 368, 5),
woosh.Token(woosh.OP, '*=', 368, 6, 368, 8),
woosh.Token(woosh.NAME, 'b', 368, 9, 368, 10),
woosh.Token(woosh.NEWLINE, '\r\n', 368, 10, 369, 0),
woosh.Token(woosh.NAME, 'return', 369, 4, 369, 10),
woosh.Token(woosh.NAME, 'a', 369, 11, 369, 12),
woosh.Token(woosh.NEWLINE, '\r\n', 369, 12, 370, 0),
woosh.Token(woosh.DEDENT, '', 371, 0, 371, 0),
woosh.Token(woosh.NAME, 'def', 371, 0, 371, 3),
woosh.Token(woosh.NAME, 'imatmul', 371, 4, 371, 11),
woosh.Token(woosh.OP, '(', 371, 11, 371, 12),
woosh.Token(woosh.NAME, 'a', 371, 12, 371, 13),
woosh.Token(woosh.OP, ',', 371, 13, 371, 14),
woosh.Token(woosh.NAME, 'b', 371, 15, 371, 16),
woosh.Token(woosh.OP, ')', 371, 16, 371, 17),
woosh.Token(woosh.OP, ':', 371, 17, 371, 18),
woosh.Token(woosh.NEWLINE, '\r\n', 371, 18, 372, 0),
woosh.Token(woosh.INDENT, ' ', 372, 0, 372, 4),
woosh.Token(woosh.STRING, '"Same as a @= b."', 372, 4, 372, 21),
woosh.Token(woosh.NEWLINE, '\r\n', 372, 21, 373, 0),
woosh.Token(woosh.NAME, 'a', 373, 4, 373, 5),
woosh.Token(woosh.OP, '@=', 373, 6, 373, 8),
woosh.Token(woosh.NAME, 'b', 373, 9, 373, 10),
woosh.Token(woosh.NEWLINE, '\r\n', 373, 10, 374, 0),
woosh.Token(woosh.NAME, 'return', 374, 4, 374, 10),
woosh.Token(woosh.NAME, 'a', 374, 11, 374, 12),
woosh.Token(woosh.NEWLINE, '\r\n', 374, 12, 375, 0),
woosh.Token(woosh.DEDENT, '', 376, 0, 376, 0),
woosh.Token(woosh.NAME, 'def', 376, 0, 376, 3),
woosh.Token(woosh.NAME, 'ior', 376, 4, 376, 7),
woosh.Token(woosh.OP, '(', 376, 7, 376, 8),
woosh.Token(woosh.NAME, 'a', 376, 8, 376, 9),
woosh.Token(woosh.OP, ',', 376, 9, 376, 10),
woosh.Token(woosh.NAME, 'b', 376, 11, 376, 12),
woosh.Token(woosh.OP, ')', 376, 12, 376, 13),
woosh.Token(woosh.OP, ':', 376, 13, 376, 14),
woosh.Token(woosh.NEWLINE, '\r\n', 376, 14, 377, 0),
woosh.Token(woosh.INDENT, ' ', 377, 0, 377, 4),
woosh.Token(woosh.STRING, '"Same as a |= b."', 377, 4, 377, 21),
woosh.Token(woosh.NEWLINE, '\r\n', 377, 21, 378, 0),
woosh.Token(woosh.NAME, 'a', 378, 4, 378, 5),
woosh.Token(woosh.OP, '|=', 378, 6, 378, 8),
woosh.Token(woosh.NAME, 'b', 378, 9, 378, 10),
woosh.Token(woosh.NEWLINE, '\r\n', 378, 10, 379, 0),
woosh.Token(woosh.NAME, 'return', 379, 4, 379, 10),
woosh.Token(woosh.NAME, 'a', 379, 11, 379, 12),
woosh.Token(woosh.NEWLINE, '\r\n', 379, 12, 380, 0),
woosh.Token(woosh.DEDENT, '', 381, 0, 381, 0),
woosh.Token(woosh.NAME, 'def', 381, 0, 381, 3),
woosh.Token(woosh.NAME, 'ipow', 381, 4, 381, 8),
woosh.Token(woosh.OP, '(', 381, 8, 381, 9),
woosh.Token(woosh.NAME, 'a', 381, 9, 381, 10),
woosh.Token(woosh.OP, ',', 381, 10, 381, 11),
woosh.Token(woosh.NAME, 'b', 381, 12, 381, 13),
woosh.Token(woosh.OP, ')', 381, 13, 381, 14),
woosh.Token(woosh.OP, ':', 381, 14, 381, 15),
woosh.Token(woosh.NEWLINE, '\r\n', 381, 15, 382, 0),
woosh.Token(woosh.INDENT, ' ', 382, 0, 382, 4),
woosh.Token(woosh.STRING, '"Same as a **= b."', 382, 4, 382, 22),
woosh.Token(woosh.NEWLINE, '\r\n', 382, 22, 383, 0),
woosh.Token(woosh.NAME, 'a', 383, 4, 383, 5),
woosh.Token(woosh.OP, '**=', 383, 6, 383, 9),
woosh.Token(woosh.NAME, 'b', 383, | |
<filename>edb/edgeql/parser/grammar/sdl.py
#
# This source file is part of the EdgeDB open source project.
#
# Copyright 2019-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from edb.errors import EdgeQLSyntaxError
from edb.edgeql import ast as qlast
from edb.common import parsing
from . import expressions
from . import commondl
from . import tokens
from .precedence import * # NOQA
from .tokens import * # NOQA
from .commondl import * # NOQA
Nonterm = expressions.Nonterm
OptSemicolons = commondl.OptSemicolons
ListNonterm = parsing.ListNonterm
sdl_nontem_helper = commondl.NewNontermHelper(__name__)
_new_nonterm = sdl_nontem_helper._new_nonterm
def _process_commands(block):
props = {}
annotations = []
fields = []
properties = []
links = []
constraints = []
indexes = []
on_target_delete = None
code = None
language = qlast.Language.SQL
from_expr = False
from_function = None
for node in block:
if isinstance(node, qlast.SDLFunctionCode):
if node.from_function:
if from_function is not None:
raise EdgeQLSyntaxError(
'more than one FROM FUNCTION clause',
context=node.context)
from_function = node.from_function
elif node.code:
if code is not None:
raise EdgeQLSyntaxError(
'more than one FROM <code> clause',
context=node.context)
code = node.code
language = node.language
else:
# FROM SQL EXPRESSION
from_expr = True
elif isinstance(node, qlast.Annotation):
annotations.append(node)
elif isinstance(node, qlast.Field):
fields.append(node)
elif isinstance(node, qlast.Property):
properties.append(node)
elif isinstance(node, qlast.Link):
links.append(node)
elif isinstance(node, qlast.Constraint):
constraints.append(node)
elif isinstance(node, qlast.IndexDeclaration):
indexes.append(node)
elif isinstance(node, qlast.SDLOnTargetDelete):
if on_target_delete:
raise EdgeQLSyntaxError(
f"more than one 'on target delete' specification",
context=node.context)
else:
on_target_delete = node
if from_expr or from_function or code:
props['function_code'] = qlast.SDLFunctionCode(
language=language,
from_function=from_function,
from_expr=from_expr,
code=code,
)
if annotations:
props['annotations'] = annotations
if fields:
props['fields'] = fields
if properties:
props['properties'] = properties
if links:
props['links'] = links
if constraints:
props['constraints'] = constraints
if indexes:
props['indexes'] = indexes
if on_target_delete:
props['on_target_delete'] = on_target_delete
return props
# top-level SDL statements
class SDLStatement(Nonterm):
def reduce_SDLBlockStatement(self, *kids):
self.val = kids[0].val
def reduce_SDLShortStatement_SEMICOLON(self, *kids):
self.val = kids[0].val
# a list of SDL statements with optional semicolon separators
class SDLStatements(ListNonterm, element=SDLStatement,
separator=OptSemicolons):
pass
# These statements have a block
class SDLBlockStatement(Nonterm):
def reduce_ScalarTypeDeclaration(self, *kids):
self.val = kids[0].val
def reduce_AnnotationDeclaration(self, *kids):
self.val = kids[0].val
def reduce_ObjectTypeDeclaration(self, *kids):
self.val = kids[0].val
def reduce_ViewDeclaration(self, *kids):
self.val = kids[0].val
def reduce_ConstraintDeclaration(self, *kids):
self.val = kids[0].val
def reduce_LinkDeclaration(self, *kids):
self.val = kids[0].val
def reduce_PropertyDeclaration(self, *kids):
self.val = kids[0].val
def reduce_FunctionDeclaration(self, *kids):
self.val = kids[0].val
# these statements have no {} block
class SDLShortStatement(Nonterm):
def reduce_IMPORT_ImportModuleList(self, *kids):
self.val = qlast.Import(modules=kids[1].val)
def reduce_IMPORT_LPAREN_ImportModuleList_RPAREN(self, *kids):
self.val = qlast.Import(modules=kids[2].val)
def reduce_ScalarTypeDeclarationShort(self, *kids):
self.val = kids[0].val
def reduce_AnnotationDeclarationShort(self, *kids):
self.val = kids[0].val
def reduce_ObjectTypeDeclarationShort(self, *kids):
self.val = kids[0].val
def reduce_ViewDeclarationShort(self, *kids):
self.val = kids[0].val
def reduce_ConstraintDeclarationShort(self, *kids):
self.val = kids[0].val
def reduce_LinkDeclarationShort(self, *kids):
self.val = kids[0].val
def reduce_PropertyDeclarationShort(self, *kids):
self.val = kids[0].val
def reduce_FunctionDeclarationShort(self, *kids):
self.val = kids[0].val
def reduce_IndexDeclaration(self, *kids):
self.val = kids[0].val
class DotName(Nonterm):
def reduce_ModuleName(self, *kids):
self.val = '.'.join(part for part in kids[0].val)
class ImportModule(Nonterm):
def reduce_DotName(self, *kids):
self.val = qlast.ImportModule(module=kids[0].val)
def reduce_DotName_AS_AnyIdentifier(self, *kids):
self.val = qlast.ImportModule(module=kids[0].val,
alias=kids[2].val)
class ImportModuleList(ListNonterm, element=ImportModule,
separator=tokens.T_COMMA):
pass
class SDLProductionHelper:
def _passthrough(self, *cmds):
self.val = cmds[0].val
def _singleton_list(self, cmd):
self.val = [cmd.val]
def _empty(self):
self.val = []
def _block(self, lbrace, sc1, cmdl, rbrace):
self.val = [cmdl.val]
def _block2(self, lbrace, sc1, cmdlist, sc2, rbrace):
self.val = cmdlist.val
def _block3(self, lbrace, sc1, cmdlist, sc2, cmd, rbrace):
self.val = cmdlist.val + [cmd.val]
def sdl_commands_block(parent, *commands, opt=True):
if parent is None:
parent = ''
# SDLCommand := SDLCommand1 | SDLCommand2 ...
#
# All the "short" commands, ones that need a ";" are gathered as
# SDLCommandShort.
#
# All the "block" commands, ones that have a "{...}" and don't
# need a ";" are gathered as SDLCommandBlock.
clsdict_b = {}
clsdict_s = {}
for command in commands:
if command.__name__.endswith('Block'):
clsdict_b[f'reduce_{command.__name__}'] = \
SDLProductionHelper._passthrough
else:
clsdict_s[f'reduce_{command.__name__}'] = \
SDLProductionHelper._passthrough
cmd_s = _new_nonterm(f'{parent}SDLCommandShort', clsdict=clsdict_s)
cmd_b = _new_nonterm(f'{parent}SDLCommandBlock', clsdict=clsdict_b)
# Merged command which has minimal ";"
#
# SDLCommandFull := SDLCommandShort ; | SDLCommandBlock
clsdict = {}
clsdict[f'reduce_{cmd_s.__name__}_SEMICOLON'] = \
SDLProductionHelper._passthrough
clsdict[f'reduce_{cmd_b.__name__}'] = \
SDLProductionHelper._passthrough
cmd = _new_nonterm(f'{parent}SDLCommandFull', clsdict=clsdict)
# SDLCommandsList := SDLCommandFull [; SDLCommandFull ...]
cmdlist = _new_nonterm(f'{parent}SDLCommandsList', clsbases=(ListNonterm,),
clskwds=dict(element=cmd, separator=OptSemicolons))
# Command block is tricky, but the inner commands must terminate
# without a ";", is possible.
#
# SDLCommandsBlock :=
#
# { [ ; ] SDLCommandFull }
# { [ ; ] SDLCommandsList [ ; ]} |
# { [ ; ] SDLCommandsList [ ; ] SDLCommandFull }
clsdict = {}
clsdict[f'reduce_LBRACE_OptSemicolons_{cmd_s.__name__}_RBRACE'] = \
SDLProductionHelper._block
clsdict[f'reduce_LBRACE_OptSemicolons_{cmdlist.__name__}_' +
f'OptSemicolons_RBRACE'] = \
SDLProductionHelper._block2
clsdict[f'reduce_LBRACE_OptSemicolons_{cmdlist.__name__}_OptSemicolons_' +
f'{cmd_s.__name__}_RBRACE'] = \
SDLProductionHelper._block3
_new_nonterm(f'{parent}SDLCommandsBlock', clsdict=clsdict)
if opt is False:
# | Command
clsdict = {}
clsdict[f'reduce_{cmd_s.__name__}'] = \
SDLProductionHelper._singleton_list
clsdict[f'reduce_{cmd_b.__name__}'] = \
SDLProductionHelper._singleton_list
_new_nonterm(parent + 'SingleSDLCommandBlock', clsdict=clsdict)
class SetField(Nonterm):
# field := <expr>
def reduce_ShortNodeName_ASSIGN_Expr(self, *kids):
self.val = qlast.Field(name=kids[0].val, value=kids[2].val)
class SetAnnotation(Nonterm):
def reduce_ANNOTATION_ShortNodeName_ASSIGN_Expr(self, *kids):
self.val = qlast.Annotation(name=kids[1].val, value=kids[3].val)
sdl_commands_block(
'Create',
SetField,
SetAnnotation)
#
# CREATE CONSTRAINT
#
class ConstraintDeclaration(Nonterm):
def reduce_CreateConstraint(self, *kids):
r"""%reduce ABSTRACT CONSTRAINT ShortNodeName OptOnExpr \
OptExtendingSimple CreateSDLCommandsBlock"""
self.val = qlast.ConstraintDeclaration(
abstract=True,
name=kids[2].val.name,
subject=kids[3].val,
extends=kids[4].val,
**_process_commands(kids[5].val)
)
def reduce_CreateConstraint_CreateFunctionArgs(self, *kids):
r"""%reduce ABSTRACT CONSTRAINT ShortNodeName CreateFunctionArgs \
OptOnExpr OptExtendingSimple CreateSDLCommandsBlock"""
self.val = qlast.ConstraintDeclaration(
abstract=True,
name=kids[2].val.name,
params=kids[3].val,
subject=kids[4].val,
extends=kids[5].val,
**_process_commands(kids[6].val)
)
class ConstraintDeclarationShort(Nonterm):
def reduce_CreateConstraint(self, *kids):
r"""%reduce ABSTRACT CONSTRAINT ShortNodeName OptOnExpr \
OptExtendingSimple"""
self.val = qlast.ConstraintDeclaration(
abstract=True,
name=kids[2].val.name,
subject=kids[3].val,
extends=kids[4].val,
)
def reduce_CreateConstraint_CreateFunctionArgs(self, *kids):
r"""%reduce ABSTRACT CONSTRAINT ShortNodeName CreateFunctionArgs \
OptOnExpr OptExtendingSimple"""
self.val = qlast.ConstraintDeclaration(
abstract=True,
name=kids[2].val.name,
params=kids[3].val,
subject=kids[4].val,
extends=kids[5].val,
)
class ConcreteConstraintBlock(Nonterm):
def reduce_CreateConstraint(self, *kids):
r"""%reduce CONSTRAINT \
NodeName OptConcreteConstraintArgList OptOnExpr \
CreateSDLCommandsBlock"""
self.val = qlast.Constraint(
delegated=False,
name=kids[1].val,
args=kids[2].val,
subject=kids[3].val,
**_process_commands(kids[4].val)
)
def reduce_CreateDelegatedConstraint(self, *kids):
r"""%reduce DELEGATED CONSTRAINT \
NodeName OptConcreteConstraintArgList OptOnExpr \
CreateSDLCommandsBlock"""
self.val = qlast.Constraint(
delegated=True,
name=kids[2].val,
args=kids[3].val,
subject=kids[4].val,
**_process_commands(kids[5].val)
)
class ConcreteConstraintShort(Nonterm):
def reduce_CreateConstraint(self, *kids):
r"""%reduce CONSTRAINT \
NodeName OptConcreteConstraintArgList OptOnExpr"""
self.val = qlast.Constraint(
delegated=False,
name=kids[1].val,
args=kids[2].val,
subject=kids[3].val,
)
def reduce_CreateDelegatedConstraint(self, *kids):
r"""%reduce DELEGATED CONSTRAINT \
NodeName OptConcreteConstraintArgList OptOnExpr"""
self.val = qlast.Constraint(
delegated=True,
name=kids[2].val,
args=kids[3].val,
subject=kids[4].val,
)
#
# CREATE SCALAR TYPE
#
sdl_commands_block(
'CreateScalarType',
SetField,
SetAnnotation,
ConcreteConstraintBlock,
ConcreteConstraintShort,
)
class ScalarTypeDeclaration(Nonterm):
def reduce_CreateAbstractScalarTypeStmt(self, *kids):
r"""%reduce \
ABSTRACT SCALAR TYPE ShortNodeName \
OptExtending CreateScalarTypeSDLCommandsBlock \
"""
self.val = qlast.ScalarTypeDeclaration(
abstract=True,
name=kids[3].val.name,
extends=kids[4].val,
**_process_commands(kids[5].val)
)
def reduce_CreateFinalScalarTypeStmt(self, *kids):
r"""%reduce \
FINAL SCALAR TYPE ShortNodeName \
OptExtending CreateScalarTypeSDLCommandsBlock \
"""
self.val = qlast.ScalarTypeDeclaration(
final=True,
name=kids[3].val.name,
extends=kids[4].val,
**_process_commands(kids[5].val)
)
def reduce_ScalarTypeDeclaration(self, *kids):
r"""%reduce \
SCALAR TYPE ShortNodeName \
OptExtending CreateScalarTypeSDLCommandsBlock \
"""
self.val = qlast.ScalarTypeDeclaration(
name=kids[2].val.name,
extends=kids[3].val,
**_process_commands(kids[4].val)
)
class ScalarTypeDeclarationShort(Nonterm):
def reduce_CreateAbstractScalarTypeStmt(self, *kids):
r"""%reduce \
ABSTRACT SCALAR TYPE ShortNodeName \
OptExtending \
"""
self.val = qlast.ScalarTypeDeclaration(
abstract=True,
name=kids[3].val.name,
extends=kids[4].val,
)
def reduce_CreateFinalScalarTypeStmt(self, *kids):
r"""%reduce \
FINAL SCALAR TYPE ShortNodeName \
OptExtending \
"""
self.val = qlast.ScalarTypeDeclaration(
final=True,
name=kids[3].val.name,
extends=kids[4].val,
)
def reduce_ScalarTypeDeclaration(self, *kids):
r"""%reduce \
SCALAR TYPE ShortNodeName \
OptExtending \
"""
self.val = qlast.ScalarTypeDeclaration(
name=kids[2].val.name,
extends=kids[3].val,
)
#
# CREATE ANNOTATION
#
class AnnotationDeclaration(Nonterm):
def reduce_CreateAnnotation(self, *kids):
r"""%reduce ABSTRACT ANNOTATION ShortNodeName OptExtendingSimple \
CreateSDLCommandsBlock"""
self.val = qlast.AnnotationDeclaration(
abstract=True,
name=kids[2].val.name,
extends=kids[3].val,
inheritable=False,
**_process_commands(kids[4].val)
)
def reduce_CreateInheritableAnnotation(self, *kids):
r"""%reduce ABSTRACT INHERITABLE ANNOTATION
ShortNodeName OptExtendingSimple CreateSDLCommandsBlock"""
self.val = qlast.AnnotationDeclaration(
abstract=True,
name=kids[3].val.name,
extends=kids[4].val,
inheritable=True,
**_process_commands(kids[4].val)
)
class AnnotationDeclarationShort(Nonterm):
def reduce_CreateAnnotation(self, *kids):
r"""%reduce ABSTRACT ANNOTATION ShortNodeName OptExtendingSimple"""
self.val = qlast.AnnotationDeclaration(
abstract=True,
name=kids[2].val.name,
extends=kids[3].val,
inheritable=False,
)
def reduce_CreateInheritableAnnotation(self, *kids):
r"""%reduce ABSTRACT INHERITABLE ANNOTATION
ShortNodeName OptExtendingSimple"""
self.val = qlast.AnnotationDeclaration(
abstract=True,
name=kids[3].val.name,
extends=kids[4].val,
inheritable=True,
)
#
# CREATE INDEX
#
class IndexDeclaration(Nonterm):
def reduce_INDEX_ShortNodeName_OnExpr(self, *kids):
self.val = qlast.IndexDeclaration(
name=kids[1].val,
expression=kids[2].val
)
#
# CREATE PROPERTY
#
class PropertyDeclaration(Nonterm):
def reduce_CreateProperty(self, *kids):
r"""%reduce ABSTRACT PROPERTY ShortNodeName OptExtendingSimple \
CreateSDLCommandsBlock \
"""
self.val = qlast.PropertyDeclaration(
abstract=True,
name=kids[2].val.name,
extends=kids[3].val,
**_process_commands(kids[4].val)
)
class PropertyDeclarationShort(Nonterm):
def reduce_CreateProperty(self, *kids):
r"""%reduce ABSTRACT PROPERTY ShortNodeName OptExtendingSimple"""
self.val = qlast.PropertyDeclaration(
abstract=True,
name=kids[2].val.name,
extends=kids[3].val,
)
#
# CREATE LINK ... { CREATE PROPERTY
#
sdl_commands_block(
'CreateConcreteProperty',
SetField,
SetAnnotation,
ConcreteConstraintBlock,
ConcreteConstraintShort,
)
class ConcretePropertyBlock(Nonterm):
def reduce_CreateRegularProperty(self, *kids):
"""%reduce
PROPERTY ShortNodeName OptExtendingSimple
ARROW FullTypeExpr CreateConcretePropertySDLCommandsBlock
"""
self.val = qlast.Property(
name=kids[1].val.name,
extends=kids[2].val,
target=qlast.get_targets(kids[4].val),
**_process_commands(kids[5].val)
)
def reduce_CreateQualifiedRegularProperty(self, *kids):
"""%reduce
PtrQuals PROPERTY ShortNodeName OptExtendingSimple
ARROW FullTypeExpr CreateConcretePropertySDLCommandsBlock
"""
self.val = qlast.Property(
name=kids[2].val.name,
extends=kids[3].val,
inherited=kids[0].val.inherited,
required=kids[0].val.required,
cardinality=kids[0].val.cardinality,
target=qlast.get_targets(kids[5].val),
**_process_commands(kids[6].val)
| |
ENTRY_AC, PUB_ID
FROM INTERPRO.SUPPLEMENTARY_REF
"""
)
for accession, pub_id in cur:
try:
e = entries[accession]
except KeyError:
continue
else:
e.literature[pub_id] = citations[pub_id]
# Cross-references
cur.execute(
"""
SELECT X.ENTRY_AC, X.AC, LOWER(D.DBSHORT)
FROM INTERPRO.ENTRY_XREF X
INNER JOIN INTERPRO.CV_DATABASE D ON X.DBCODE = D.DBCODE
"""
)
for accession, ref_id, ref_db in cur:
try:
e = entries[accession]
except KeyError:
continue
try:
e.cross_references[ref_db].append(ref_id)
except KeyError:
e.cross_references[ref_db] = [ref_id]
return list(entries.values())
def _get_signatures(cur: cx_Oracle.Cursor) -> List[Entry]:
cur.execute(
"""
SELECT
M.METHOD_AC, M.NAME, M.DESCRIPTION, M.ABSTRACT, M.ABSTRACT_LONG,
M.METHOD_DATE, ET.ABBREV, LOWER(DB.DBSHORT), E2M.ENTRY_AC
FROM INTERPRO.METHOD M
INNER JOIN INTERPRO.CV_ENTRY_TYPE ET
ON M.SIG_TYPE = ET.CODE
INNER JOIN INTERPRO.CV_DATABASE DB
ON M.DBCODE = DB.DBCODE
LEFT OUTER JOIN INTERPRO.ENTRY2METHOD E2M
ON M.METHOD_AC = E2M.METHOD_AC
AND E2M.ENTRY_AC IN (
SELECT ENTRY_AC
FROM INTERPRO.ENTRY
WHERE CHECKED='Y'
)
WHERE M.DBCODE != 'g' -- discarding MobiDB-Lite
"""
)
entries = {}
for row in cur:
accession = row[0]
short_name = row[1] if row[1] != accession else None
name = row[2]
creation_date = row[5]
entry_type = row[6]
database = row[7]
integrated_in = row[8]
e = Entry(accession, entry_type, name, short_name, database)
e.creation_date = creation_date
e.integrated_in = integrated_in
if row[4]:
e.description.append(row[4].read().lstrip("<p>").rstrip("</p>"))
elif row[3]:
e.description.append(row[3].lstrip("<p>").rstrip("</p>"))
entries[accession] = e
# Literature references
citations = _get_citations(cur)
cur.execute(
"""
SELECT METHOD_AC, PUB_ID
FROM INTERPRO.METHOD2PUB
"""
)
for accession, pub_id in cur:
try:
e = entries[accession]
except KeyError:
continue
else:
e.literature[pub_id] = citations[pub_id]
return list(entries.values())
class EntryXrefs:
def __init__(self):
self.ida = set()
self.matches = 0
self.proteins = set()
self.proteomes = set()
self.structures = set()
self.taxa = set()
def asdict(self):
return {
"domain_architectures": self.ida,
"matches": self.matches,
"proteins": self.proteins,
"proteomes": self.proteomes,
"structures": self.structures,
"taxa": self.taxa
}
class Supermatch:
def __init__(self, acc: str, frags: Sequence[dict], root: Optional[str]):
self.members = {(acc, root)}
self.fragments = frags
"""
frags is sorted by (start, end):
- start of the first frag is guaranteed to be the leftmost one
- end of the last frag is NOT guaranteed to be the rightmost one
(e.g. [(5, 100), (6, 80)])
"""
self.start = frags[0]["start"]
self.end = max(f["end"] for f in frags)
def __eq__(self, other) -> bool:
return self.start == other.start and self.end == other.end
def __ne__(self, other) -> bool:
return not self == other
def __lt__(self, other) -> bool:
return self.start < other.start or self.end < other.end
def __le__(self, other) -> bool:
return self == other or self < other
def __gt__(self, other) -> bool:
return self.start > other.start or self.end > other.end
def __ge__(self, other) -> bool:
return self == other or self > other
@property
def entries(self):
return [acc for acc, root in self.members]
def stringify_fragments(self):
return ','.join(["{start}-{end}".format(**f) for f in self.fragments])
def overlaps(self, other, min_overlap: float) -> bool:
for acc1, root1 in self.members:
for acc2, root2 in other.members:
if root1 != root2:
return False
# All members are in the same hierarchy
overlap = min(self.end, other.end) - max(self.start, other.start) + 1
shortest = min(self.end - self.start, other.end - other.start) + 1
if overlap < shortest * min_overlap:
# Supermatches do not significantly overlap
return False
# Merge supermatches
self.members |= other.members
self.start = min(self.start, other.start)
self.end = max(self.end, other.end)
# Merge fragments
fragments = []
for f1 in sorted(self.fragments+other.fragments, key=repr_fragment):
start1 = f1["start"]
end1 = f1["end"]
for f2 in fragments:
start2 = f2["start"]
end2 = f2["end"]
overlap = min(end1, end2) - max(start1, start2) + 1
shortest = min(end1 - start1, end2 - start2) + 1
if overlap >= shortest * min_overlap:
# Merge f1 into f2
f2["start"] = min(start1, start2)
f2["end"] = max(end1, end2)
break
else:
# f1 does not overlap with any other fragments
fragments.append(f1)
self.fragments = fragments
return True
def _process_proteins(inqueue: Queue, entries: Mapping[str, Entry],
min_overlap: bool, dt: DirectoryTree, outqueue: Queue):
xrefs = {} # temporary dict accession->xrefs
xref_files = [] # files containing xrefs
entries_with_xrefs = set() # accession of entries having xrefs
entry_counts = {} # number of matches
entry_intersections = {} # number of overlapping matches
interpro2enzyme = {} # InterPro-ENZYME mapping
interpro2reactome = {} # InterPro-Reactome mapping
ida_file = dt.mktemp()
with DumpFile(ida_file, compress=True) as ida_df:
i = 0
for obj in iter(inqueue.get, None):
uniprot_acc = obj[0] # str
protein_info = obj[1] # dict
matches = obj[2] # dict
proteome_id = obj[3] # str or None
pdb_entries = obj[4] # dict
enzymes = obj[5] # set
pathways = obj[6] # set
supermatches = []
all_locations = []
for entry_acc, locations in matches.items():
entry = entries[entry_acc]
if entry.database == "interpro":
# Adding EC / Reactome mapping
if enzymes:
try:
interpro2enzyme[entry_acc] |= enzymes
except KeyError:
interpro2enzyme[entry_acc] = enzymes.copy()
if pathways:
try:
interpro2reactome[entry_acc] |= pathways
except KeyError:
interpro2reactome[entry_acc] = pathways.copy()
elif entry.database == "pfam":
# Storing matches for IDA
for loc in locations:
all_locations.append({
"pfam": entry_acc,
"interpro": entry.integrated_in,
# We do not consider fragmented locations
"start": loc["fragments"][0]["start"],
"end": max(f["end"] for f in loc["fragments"])
})
# Adding cross-references (except IDA, still being calculated)
try:
entry_xrefs = xrefs[entry_acc]
except KeyError:
entry_xrefs = xrefs[entry_acc] = EntryXrefs()
entries_with_xrefs.add(entry_acc)
entry_xrefs.matches += len(locations)
entry_xrefs.proteins.add((
uniprot_acc,
protein_info["identifier"]
))
if proteome_id:
entry_xrefs.proteomes.add(proteome_id)
for pdb_id, chains in pdb_entries.items():
for chain_id, segments in chains.items():
if overlaps_pdb_chain(locations, segments):
entry_xrefs.structures.add(pdb_id)
break # Skip other chains
entry_xrefs.taxa.add(protein_info["taxid"])
# Create a Supermatch for each integrated signature match
if entry.integrated_in:
# Integrated member database signature
interpro_acc = entry.integrated_in
root = entries[interpro_acc].hierarchy["accession"]
for loc in locations:
sm = Supermatch(interpro_acc, loc["fragments"], root)
supermatches.append(sm)
# Finishing IDA
domains = []
dom_members = set()
for loc in sorted(all_locations, key=repr_fragment):
if loc["interpro"]:
domains.append(f"{loc['pfam']}:{loc['interpro']}")
dom_members.add(loc["interpro"])
else:
domains.append(loc["pfam"])
dom_members.add(loc["pfam"])
if domains:
# Flush IDA
dom_str = '-'.join(domains)
dom_id = hashlib.sha1(dom_str.encode("utf-8")).hexdigest()
ida_df.dump((uniprot_acc, dom_members, dom_str, dom_id))
# Adding cross-references now
for key in dom_members:
xrefs[key].ida.add(dom_id)
# Merging overlapping supermatches
merged = []
for sm_to_merge in sorted(supermatches):
for sm_merged in merged:
if sm_merged.overlaps(sm_to_merge, min_overlap):
"""
Supermatches overlap
(sm_to_merge has been merged into sm_merged)
"""
break
else:
# sm_to_merge does not overlap with any other supermatches
merged.append(sm_to_merge)
# Group by entry
merged_grouped = {}
for sm in merged:
for interpro_acc in sm.entries:
try:
merged_grouped[interpro_acc] += sm.fragments
except KeyError:
merged_grouped[interpro_acc] = list(sm.fragments)
# Evaluate how entries overlap
for interpro_acc, fragments1 in merged_grouped.items():
try:
entry_counts[interpro_acc] += 1
except KeyError:
entry_counts[interpro_acc] = 1
for other_acc, fragments2 in merged_grouped.items():
if other_acc >= interpro_acc:
continue
try:
obj = entry_intersections[interpro_acc]
except KeyError:
obj = entry_intersections[interpro_acc] = {}
try:
overlaps = obj[other_acc]
except KeyError:
"""
Use a dict rather than a list (or tuple)
because deepupdate() would concatenate the lists
created by different workers
"""
overlaps = obj[other_acc] = {
"1": 0,
"2": 0,
}
flag = 0
for f1 in fragments1:
start1 = f1["start"]
end1 = f1["end"]
length1 = end1 - start1 + 1
for f2 in fragments2:
start2 = f2["start"]
end2 = f2["end"]
length2 = end2 - start2 + 1
overlap = min(end1, end2) - max(start1, start2) + 1
if not flag & 1 and overlap >= length1 * 0.5:
# 1st time fragments overlap >= 50% of f1
flag |= 1
overlaps["1"] += 1
if not flag & 2 and overlap >= length2 * 0.5:
# 1st time fragments overlap >= 50% of f2
flag |= 2
overlaps["2"] += 1
if flag == 3:
"""
Both cases already happened
-> no need to keep iterating
"""
break
i += 1
if not i % 100000:
# Flush Xrefs
file = dt.mktemp()
with DumpFile(file, compress=True) as xref_df:
for entry_acc in sorted(xrefs):
xref_df.dump((entry_acc, xrefs[entry_acc].asdict()))
xrefs = {}
xref_files.append(file)
# Remaining xrefs
file = dt.mktemp()
with DumpFile(file, compress=True) as df:
for entry_acc in sorted(xrefs):
df.dump((entry_acc, xrefs[entry_acc].asdict()))
xref_files.append(file)
# Merge files (each worker will produce one merged file)
xref_file = dt.mktemp()
with DumpFile(xref_file, compress=True) as df:
for entry_acc, xrefs in merge_dumps(xref_files):
df.dump((entry_acc, xrefs))
outqueue.put((
xref_file,
entries_with_xrefs,
ida_file,
entry_counts,
entry_intersections,
interpro2enzyme,
interpro2reactome
))
def export_entries(ipr_url: str, goa_url: str, intact_url: str, swpr_url: str,
p_metacyc: str, p_clans: str, p_proteins: str,
p_structures: str, p_uniprot2matches: str,
p_uniprot2proteome: str, p_uniprot2ida: str,
p_entry2xrefs: str, p_entries: str, **kwargs):
min_overlap = kwargs.get("overlap", 0.2)
processes = kwargs.get("processes", 1)
min_similarity = kwargs.get("similarity", 0.75)
tmpdir = kwargs.get("tmpdir")
con = cx_Oracle.connect(ipr_url)
cur = | |
view_no(self):
return self._data.view_no
@property
def sent_preprepares(self):
return self._data.sent_preprepares
@property
def prepares(self):
return self._data.prepares
@property
def commits(self):
return self._data.commits
@property
def requested_pre_prepares(self):
return self._data.requested_pre_prepares
@property
def last_ordered_3pc(self):
return self._data.last_ordered_3pc
@last_ordered_3pc.setter
def last_ordered_3pc(self, lo_tuple):
self._data.last_ordered_3pc = lo_tuple
pp_seq_no = lo_tuple[1]
if pp_seq_no > self.lastPrePrepareSeqNo:
self.lastPrePrepareSeqNo = pp_seq_no
self._logger.info('{} set last ordered as {}'.format(self, lo_tuple))
@property
def last_preprepare(self):
last_3pc = (0, 0)
lastPp = None
if self.sent_preprepares:
(v, s), pp = self.sent_preprepares.peekitem(-1)
last_3pc = (v, s)
lastPp = pp
if self.prePrepares:
(v, s), pp = self.prePrepares.peekitem(-1)
if compare_3PC_keys(last_3pc, (v, s)) > 0:
lastPp = pp
return lastPp
@property
def __last_pp_3pc(self):
last_pp = self.last_preprepare
if not last_pp:
return self.last_ordered_3pc
last_3pc = (last_pp.viewNo, last_pp.ppSeqNo)
if compare_3PC_keys(self.last_ordered_3pc, last_3pc) > 0:
return last_3pc
return self.last_ordered_3pc
@property
def db_manager(self):
return self._write_manager.database_manager
@property
def is_master(self):
return self._data.is_master
@property
def primary_name(self):
"""
Name of the primary replica of this replica's instance
:return: Returns name if primary is known, None otherwise
"""
return self._data.primary_name
@property
def name(self):
return self._data.name
@name.setter
def name(self, n):
self._data._name = n
@property
def f(self):
return getMaxFailures(self._data.total_nodes)
def gc(self, till3PCKey):
self._logger.info("{} cleaning up till {}".format(self, till3PCKey))
tpcKeys = set()
reqKeys = set()
for key3PC, pp in itertools.chain(
self.sent_preprepares.items(),
self.prePrepares.items()
):
if compare_3PC_keys(till3PCKey, key3PC) <= 0:
tpcKeys.add(key3PC)
for reqKey in pp.reqIdr:
reqKeys.add(reqKey)
for key3PC, pp_dict in self.pre_prepare_tss.items():
if compare_3PC_keys(till3PCKey, key3PC) <= 0:
tpcKeys.add(key3PC)
# TODO INDY-1983: was found that it adds additional
# requests to clean, need to explore why
# for (pp, _) in pp_dict:
# for reqKey in pp.reqIdr:
# reqKeys.add(reqKey)
self._logger.trace("{} found {} 3-phase keys to clean".
format(self, len(tpcKeys)))
self._logger.trace("{} found {} request keys to clean".
format(self, len(reqKeys)))
self.old_view_preprepares = {k: v for k, v in self.old_view_preprepares.items()
if compare_3PC_keys(till3PCKey, (k[0], k[1])) > 0}
to_clean_up = (
self.pre_prepare_tss,
self.sent_preprepares,
self.prePrepares,
self.prepares,
self.commits,
self.batches,
self.pre_prepares_stashed_for_incorrect_time,
)
for request_key in tpcKeys:
pp = self.get_preprepare(*request_key)
if pp:
self._clear_batch(self.get_preprepare(*request_key))
for coll in to_clean_up:
coll.pop(request_key, None)
for request_key in reqKeys:
self._requests.ordered_by_replica(request_key)
self._requests.free(request_key)
for ledger_id, keys in self.requestQueues.items():
if request_key in keys:
self.discard_req_key(ledger_id, request_key)
self._logger.trace('{} freed request {} from previous checkpoints'
.format(self, request_key))
# ToDo: do we need ordered messages there?
self.ordered.clear_below_view(self.view_no - 1)
# BLS multi-sig:
self.l_bls_bft_replica.gc(till3PCKey)
def discard_req_key(self, ledger_id, req_key):
self.requestQueues[ledger_id].discard(req_key)
def _clear_prev_view_pre_prepares(self):
to_remove = []
for idx, (pp, _, _) in enumerate(self.prePreparesPendingFinReqs):
if pp.viewNo < self.view_no:
to_remove.insert(0, idx)
for idx in to_remove:
self.prePreparesPendingFinReqs.pop(idx)
for (v, p) in list(self.prePreparesPendingPrevPP.keys()):
if v < self.view_no:
self.prePreparesPendingPrevPP.pop((v, p))
def report_suspicious_node(self, ex: SuspiciousNode):
self._bus.send(RaisedSuspicion(inst_id=self._data.inst_id,
ex=ex))
def _validate(self, msg):
return self._validator.validate(msg)
"""Method from legacy code"""
def l_compact_primary_names(self):
min_allowed_view_no = self.view_no - 1
views_to_remove = []
for view_no in self.primary_names:
if view_no >= min_allowed_view_no:
break
views_to_remove.append(view_no)
for view_no in views_to_remove:
self.primary_names.pop(view_no)
def _can_process_pre_prepare(self, pre_prepare: PrePrepare, sender: str):
"""
Decide whether this replica is eligible to process a PRE-PREPARE.
:param pre_prepare: a PRE-PREPARE msg to process
:param sender: the name of the node that sent the PRE-PREPARE msg
"""
if self._validator.has_already_ordered(pre_prepare.viewNo, pre_prepare.ppSeqNo):
return None
digest = self.generate_pp_digest(pre_prepare.reqIdr,
get_original_viewno(pre_prepare),
pre_prepare.ppTime)
if digest != pre_prepare.digest:
return PP_APPLY_WRONG_DIGEST
# PRE-PREPARE should not be sent from non primary
if not self._is_msg_from_primary(pre_prepare, sender):
return PP_CHECK_NOT_FROM_PRIMARY
# Already has a PRE-PREPARE with same 3 phase key
if (pre_prepare.viewNo, pre_prepare.ppSeqNo) in self.prePrepares:
return PP_CHECK_DUPLICATE
if not self._is_pre_prepare_time_acceptable(pre_prepare, sender):
return PP_CHECK_WRONG_TIME
if compare_3PC_keys((pre_prepare.viewNo, pre_prepare.ppSeqNo),
self.__last_pp_3pc) > 0:
return PP_CHECK_OLD # ignore old pre-prepare
if self._non_finalised_reqs(pre_prepare.reqIdr):
return PP_CHECK_REQUEST_NOT_FINALIZED
if not self._is_next_pre_prepare(pre_prepare.viewNo,
pre_prepare.ppSeqNo):
return PP_CHECK_NOT_NEXT
if f.POOL_STATE_ROOT_HASH.nm in pre_prepare and \
pre_prepare.poolStateRootHash != self.get_state_root_hash(POOL_LEDGER_ID):
return PP_CHECK_INCORRECT_POOL_STATE_ROOT
# BLS multi-sig:
status = self.l_bls_bft_replica.validate_pre_prepare(pre_prepare,
sender)
if status is not None:
return status
return None
def _schedule(self, func, delay):
self._timer.schedule(delay, func)
def _process_valid_preprepare(self, pre_prepare: PrePrepare, sender: str):
why_not_applied = None
# apply and validate applied PrePrepare if it's not odered yet
if not self._validator.has_already_ordered(pre_prepare.viewNo, pre_prepare.ppSeqNo):
why_not_applied = self._apply_and_validate_applied_pre_prepare(pre_prepare, sender)
if why_not_applied is not None:
return why_not_applied
# add to PrePrepares
if self._data.is_primary:
self._add_to_sent_pre_prepares(pre_prepare)
else:
self._add_to_pre_prepares(pre_prepare)
return None
def _apply_and_validate_applied_pre_prepare(self, pre_prepare: PrePrepare, sender: str):
self.first_batch_after_catchup = False
old_state_root = self.get_state_root_hash(pre_prepare.ledgerId, to_str=False)
old_txn_root = self.get_txn_root_hash(pre_prepare.ledgerId)
if self.is_master:
self._logger.debug('{} state root before processing {} is {}, {}'.format(
self,
pre_prepare,
old_state_root,
old_txn_root))
# 1. APPLY
reqs, invalid_indices, rejects, suspicious = self._apply_pre_prepare(pre_prepare)
# 2. CHECK IF MORE CHUNKS NEED TO BE APPLIED FURTHER BEFORE VALIDATION
if pre_prepare.sub_seq_no != 0:
return PP_SUB_SEQ_NO_WRONG
if not pre_prepare.final:
return PP_NOT_FINAL
# 3. VALIDATE APPLIED
invalid_from_pp = invalid_index_serializer.deserialize(pre_prepare.discarded)
if suspicious:
why_not_applied = PP_REQUEST_ALREADY_ORDERED
else:
why_not_applied = self._validate_applied_pre_prepare(pre_prepare,
invalid_indices, invalid_from_pp)
# 4. IF NOT VALID AFTER APPLYING - REVERT
if why_not_applied is not None:
if self.is_master:
self._revert(pre_prepare.ledgerId,
old_state_root,
len(pre_prepare.reqIdr) - len(invalid_indices))
return why_not_applied
# 5. TRACK APPLIED
if rejects:
for reject in rejects:
self._network.send(reject)
if self.is_master:
# BLS multi-sig:
self.l_bls_bft_replica.process_pre_prepare(pre_prepare, sender)
self._logger.trace("{} saved shared multi signature for "
"root".format(self, old_state_root))
if not self.is_master:
self.db_manager.get_store(LAST_SENT_PP_STORE_LABEL).store_last_sent_pp_seq_no(
self._data.inst_id, pre_prepare.ppSeqNo)
self._track_batches(pre_prepare, old_state_root)
key = (pre_prepare.viewNo, pre_prepare.ppSeqNo)
self._logger.debug("{} processed incoming PRE-PREPARE{}".format(self, key),
extra={"tags": ["processing"]})
return None
def _enqueue_pre_prepare(self, pre_prepare: PrePrepare, sender: str,
nonFinReqs: Set = None):
if nonFinReqs:
self._logger.info("{} - Queueing pre-prepares due to unavailability of finalised "
"requests. PrePrepare {} from {}".format(self, pre_prepare, sender))
self.prePreparesPendingFinReqs.append((pre_prepare, sender, nonFinReqs))
else:
# Possible exploit, an malicious party can send an invalid
# pre-prepare and over-write the correct one?
self._logger.info("Queueing pre-prepares due to unavailability of previous pre-prepares. {} from {}".
format(pre_prepare, sender))
self.prePreparesPendingPrevPP[pre_prepare.viewNo, pre_prepare.ppSeqNo] = (pre_prepare, sender)
def _request_propagates_if_needed(self, bad_reqs: list, pre_prepare: PrePrepare):
if any(pre_prepare is pended[0] for pended in self.prePreparesPendingFinReqs):
self._bus.send(RequestPropagates(bad_reqs))
def _request_missing_three_phase_messages(self, view_no: int, seq_frm: int, seq_to: int) -> None:
for pp_seq_no in range(seq_frm, seq_to + 1):
key = (view_no, pp_seq_no)
self._request_pre_prepare(key)
self._request_prepare(key)
self._request_commit(key)
def _request_three_phase_msg(self, three_pc_key: Tuple[int, int],
msg_type: str,
recipients: Optional[List[str]] = None,
stash_data: Optional[Tuple[str, str, str]] = None):
self._bus.send(MissingMessage(msg_type,
three_pc_key,
self._data.inst_id,
recipients,
stash_data))
def _request_pre_prepare(self, three_pc_key: Tuple[int, int],
stash_data: Optional[Tuple[str, str, str]] = None):
"""
Request preprepare
"""
if not self._config.PRE_PREPARE_REQUEST_ENABLED:
return
recipients = [getNodeName(self.primary_name)]
self._request_three_phase_msg(three_pc_key,
PREPREPARE,
recipients,
stash_data)
def _request_prepare(self, three_pc_key: Tuple[int, int],
recipients: List[str] = None,
stash_data: Optional[Tuple[str, str, str]] = None):
"""
Request preprepare
"""
if not self._config.PREPARE_REQUEST_ENABLED:
return
if recipients is None:
recipients = self._network.connecteds.copy()
primary_node_name = getNodeName(self.primary_name)
if primary_node_name in recipients:
recipients.remove(primary_node_name)
return self._request_three_phase_msg(three_pc_key, PREPARE, recipients, stash_data)
def _request_commit(self, three_pc_key: Tuple[int, int],
recipients: List[str] = None):
"""
Request commit
"""
if not self._config.COMMIT_REQUEST_ENABLED:
return
if recipients is None:
recipients = self._network.connecteds.copy()
self._request_three_phase_msg(three_pc_key, COMMIT, recipients)
"""Method from legacy code"""
def l_setup_last_ordered_for_non_master(self):
"""
Since last ordered view_no and pp_seq_no are only communicated for
master instance, backup instances use this method for restoring
`last_ordered_3pc`
:return:
"""
if not self.is_master and self.first_batch_after_catchup and \
not self._data.is_primary:
# If not master instance choose last ordered seq no to be 1 less
# the lowest prepared certificate in this view
lowest_prepared = self.l_get_lowest_probable_prepared_certificate_in_view(
self.view_no)
if lowest_prepared is not None:
# now after catch up we have in last_ordered_3pc[1] value 0
# it value should change last_ordered_3pc to lowest_prepared - 1
self._logger.info('{} Setting last ordered for non-master as {}'.
format(self, self.last_ordered_3pc))
self.last_ordered_3pc = (self.view_no, lowest_prepared - 1)
self._bus.send(BackupSetupLastOrdered(inst_id=self._data.inst_id))
self.first_batch_after_catchup = False
def get_state_root_hash(self, ledger_id: str, to_str=True, committed=False):
return self.db_manager.get_state_root_hash(ledger_id, to_str, committed) \
if self.is_master \
else None
def get_txn_root_hash(self, ledger_id: str, to_str=True):
return self.db_manager.get_txn_root_hash(ledger_id, to_str) \
if self.is_master \
else None
def _is_msg_from_primary(self, msg, sender: str) -> bool:
"""
Return whether this message was from primary replica
:param msg:
:param sender:
:return:
"""
if self._is_msg_for_current_view(msg):
return self.primary_name == sender
try:
return self.primary_names[msg.viewNo] == sender
except KeyError:
return False
def _is_msg_for_current_view(self, msg):
"""
Return whether this request's view number is equal to the current view
number of this replica.
"""
viewNo = getattr(msg, "viewNo", None)
return viewNo == self.view_no
def _is_pre_prepare_time_correct(self, pp: PrePrepare, sender: str) -> bool:
"""
Check if this PRE-PREPARE is not older than (not checking for greater
than since batches maybe sent in less than 1 second) last PRE-PREPARE
and in a sufficient range of local clock's UTC time.
:param pp:
:return:
"""
tpcKey = (pp.viewNo, pp.ppSeqNo)
if (self.last_accepted_pre_prepare_time and
pp.ppTime < self.last_accepted_pre_prepare_time):
return False
elif ((tpcKey not in self.pre_prepare_tss) or
((pp.auditTxnRootHash, sender) not in self.pre_prepare_tss[tpcKey])):
return False
else:
return (
abs(pp.ppTime - self.pre_prepare_tss[tpcKey][pp.auditTxnRootHash, | |
import json
import logging
import os
from textwrap import dedent
from tasks.cephfs.cephfs_test_case import CephFSTestCase
from tasks.cephfs.fuse_mount import FuseMount
from teuthology.exceptions import CommandFailedError
from teuthology.misc import sudo_write_file
log = logging.getLogger(__name__)
class TestVolumeClient(CephFSTestCase):
# One for looking at the global filesystem, one for being
# the VolumeClient, two for mounting the created shares
CLIENTS_REQUIRED = 4
py_version = 'python'
def setUp(self):
CephFSTestCase.setUp(self)
self.py_version = self.ctx.config.get('overrides', {}).get('python', 'python')
log.info("using python version: {python_version}".format(
python_version=self.py_version
))
def _volume_client_python(self, client, script, vol_prefix=None, ns_prefix=None):
# Can't dedent this *and* the script we pass in, because they might have different
# levels of indentation to begin with, so leave this string zero-indented
if vol_prefix:
vol_prefix = "\"" + vol_prefix + "\""
if ns_prefix:
ns_prefix = "\"" + ns_prefix + "\""
return client.run_python("""
from __future__ import print_function
from ceph_volume_client import CephFSVolumeClient, VolumePath
import logging
log = logging.getLogger("ceph_volume_client")
log.addHandler(logging.StreamHandler())
log.setLevel(logging.DEBUG)
vc = CephFSVolumeClient("manila", "{conf_path}", "ceph", {vol_prefix}, {ns_prefix})
vc.connect()
{payload}
vc.disconnect()
""".format(payload=script, conf_path=client.config_path,
vol_prefix=vol_prefix, ns_prefix=ns_prefix),
self.py_version)
def _sudo_write_file(self, remote, path, data):
"""
Write data to a remote file as super user
:param remote: Remote site.
:param path: Path on the remote being written to.
:param data: Data to be written.
Both perms and owner are passed directly to chmod.
"""
remote.run(
args=[
'sudo',
'python',
'-c',
'import shutil, sys; shutil.copyfileobj(sys.stdin, file(sys.argv[1], "wb"))',
path,
],
stdin=data,
)
def _configure_vc_auth(self, mount, id_name):
"""
Set up auth credentials for the VolumeClient user
"""
out = self.fs.mon_manager.raw_cluster_cmd(
"auth", "get-or-create", "client.{name}".format(name=id_name),
"mds", "allow *",
"osd", "allow rw",
"mon", "allow *"
)
mount.client_id = id_name
self._sudo_write_file(mount.client_remote, mount.get_keyring_path(), out)
self.set_conf("client.{name}".format(name=id_name), "keyring", mount.get_keyring_path())
def _configure_guest_auth(self, volumeclient_mount, guest_mount,
guest_entity, mount_path,
namespace_prefix=None, readonly=False,
tenant_id=None):
"""
Set up auth credentials for the guest client to mount a volume.
:param volumeclient_mount: mount used as the handle for driving
volumeclient.
:param guest_mount: mount used by the guest client.
:param guest_entity: auth ID used by the guest client.
:param mount_path: path of the volume.
:param namespace_prefix: name prefix of the RADOS namespace, which
is used for the volume's layout.
:param readonly: defaults to False. If set to 'True' only read-only
mount access is granted to the guest.
:param tenant_id: (OpenStack) tenant ID of the guest client.
"""
head, volume_id = os.path.split(mount_path)
head, group_id = os.path.split(head)
head, volume_prefix = os.path.split(head)
volume_prefix = "/" + volume_prefix
# Authorize the guest client's auth ID to mount the volume.
key = self._volume_client_python(volumeclient_mount, dedent("""
vp = VolumePath("{group_id}", "{volume_id}")
auth_result = vc.authorize(vp, "{guest_entity}", readonly={readonly},
tenant_id="{tenant_id}")
print(auth_result['auth_key'])
""".format(
group_id=group_id,
volume_id=volume_id,
guest_entity=guest_entity,
readonly=readonly,
tenant_id=tenant_id)), volume_prefix, namespace_prefix
)
# CephFSVolumeClient's authorize() does not return the secret
# key to a caller who isn't multi-tenant aware. Explicitly
# query the key for such a client.
if not tenant_id:
key = self.fs.mon_manager.raw_cluster_cmd(
"auth", "get-key", "client.{name}".format(name=guest_entity),
)
# The guest auth ID should exist.
existing_ids = [a['entity'] for a in self.auth_list()]
self.assertIn("client.{0}".format(guest_entity), existing_ids)
# Create keyring file for the guest client.
keyring_txt = dedent("""
[client.{guest_entity}]
key = {key}
""".format(
guest_entity=guest_entity,
key=key
))
guest_mount.client_id = guest_entity
self._sudo_write_file(guest_mount.client_remote,
guest_mount.get_keyring_path(),
keyring_txt)
# Add a guest client section to the ceph config file.
self.set_conf("client.{0}".format(guest_entity), "client quota", "True")
self.set_conf("client.{0}".format(guest_entity), "debug client", "20")
self.set_conf("client.{0}".format(guest_entity), "debug objecter", "20")
self.set_conf("client.{0}".format(guest_entity),
"keyring", guest_mount.get_keyring_path())
def test_default_prefix(self):
group_id = "grpid"
volume_id = "volid"
DEFAULT_VOL_PREFIX = "volumes"
DEFAULT_NS_PREFIX = "fsvolumens_"
self.mount_b.umount_wait()
self._configure_vc_auth(self.mount_b, "manila")
#create a volume with default prefix
self._volume_client_python(self.mount_b, dedent("""
vp = VolumePath("{group_id}", "{volume_id}")
vc.create_volume(vp, 10, data_isolated=True)
""".format(
group_id=group_id,
volume_id=volume_id,
)))
# The dir should be created
self.mount_a.stat(os.path.join(DEFAULT_VOL_PREFIX, group_id, volume_id))
#namespace should be set
ns_in_attr = self.mount_a.getfattr(os.path.join(DEFAULT_VOL_PREFIX, group_id, volume_id), "ceph.dir.layout.pool_namespace")
namespace = "{0}{1}".format(DEFAULT_NS_PREFIX, volume_id)
self.assertEqual(namespace, ns_in_attr)
def test_lifecycle(self):
"""
General smoke test for create, extend, destroy
"""
# I'm going to use mount_c later as a guest for mounting the created
# shares
self.mounts[2].umount_wait()
# I'm going to leave mount_b unmounted and just use it as a handle for
# driving volumeclient. It's a little hacky but we don't have a more
# general concept for librados/libcephfs clients as opposed to full
# blown mounting clients.
self.mount_b.umount_wait()
self._configure_vc_auth(self.mount_b, "manila")
guest_entity = "guest"
group_id = "grpid"
volume_id = "volid"
volume_prefix = "/myprefix"
namespace_prefix = "mynsprefix_"
# Create a 100MB volume
volume_size = 100
mount_path = self._volume_client_python(self.mount_b, dedent("""
vp = VolumePath("{group_id}", "{volume_id}")
create_result = vc.create_volume(vp, 1024*1024*{volume_size})
print(create_result['mount_path'])
""".format(
group_id=group_id,
volume_id=volume_id,
volume_size=volume_size
)), volume_prefix, namespace_prefix)
# The dir should be created
self.mount_a.stat(os.path.join("myprefix", group_id, volume_id))
# Authorize and configure credentials for the guest to mount the
# the volume.
self._configure_guest_auth(self.mount_b, self.mounts[2], guest_entity,
mount_path, namespace_prefix)
self.mounts[2].mount(mount_path=mount_path)
# The kernel client doesn't have the quota-based df behaviour,
# or quotas at all, so only exercise the client behaviour when
# running fuse.
if isinstance(self.mounts[2], FuseMount):
# df should see volume size, same as the quota set on volume's dir
self.assertEqual(self.mounts[2].df()['total'],
volume_size * 1024 * 1024)
self.assertEqual(
self.mount_a.getfattr(
os.path.join(volume_prefix.strip("/"), group_id, volume_id),
"ceph.quota.max_bytes"),
"%s" % (volume_size * 1024 * 1024))
# df granularity is 4MB block so have to write at least that much
data_bin_mb = 4
self.mounts[2].write_n_mb("data.bin", data_bin_mb)
# Write something outside volume to check this space usage is
# not reported in the volume's DF.
other_bin_mb = 8
self.mount_a.write_n_mb("other.bin", other_bin_mb)
# global: df should see all the writes (data + other). This is a >
# rather than a == because the global spaced used includes all pools
def check_df():
used = self.mount_a.df()['used']
return used >= (other_bin_mb * 1024 * 1024)
self.wait_until_true(check_df, timeout=30)
# Hack: do a metadata IO to kick rstats
self.mounts[2].run_shell(["touch", "foo"])
# volume: df should see the data_bin_mb consumed from quota, same
# as the rbytes for the volume's dir
self.wait_until_equal(
lambda: self.mounts[2].df()['used'],
data_bin_mb * 1024 * 1024, timeout=60)
self.wait_until_equal(
lambda: self.mount_a.getfattr(
os.path.join(volume_prefix.strip("/"), group_id, volume_id),
"ceph.dir.rbytes"),
"%s" % (data_bin_mb * 1024 * 1024), timeout=60)
# sync so that file data are persist to rados
self.mounts[2].run_shell(["sync"])
# Our data should stay in particular rados namespace
pool_name = self.mount_a.getfattr(os.path.join("myprefix", group_id, volume_id), "ceph.dir.layout.pool")
namespace = "{0}{1}".format(namespace_prefix, volume_id)
ns_in_attr = self.mount_a.getfattr(os.path.join("myprefix", group_id, volume_id), "ceph.dir.layout.pool_namespace")
self.assertEqual(namespace, ns_in_attr)
objects_in_ns = set(self.fs.rados(["ls"], pool=pool_name, namespace=namespace).split("\n"))
self.assertNotEqual(objects_in_ns, set())
# De-authorize the guest
self._volume_client_python(self.mount_b, dedent("""
vp = VolumePath("{group_id}", "{volume_id}")
vc.deauthorize(vp, "{guest_entity}")
vc.evict("{guest_entity}")
""".format(
group_id=group_id,
volume_id=volume_id,
guest_entity=guest_entity
)), volume_prefix, namespace_prefix)
# Once deauthorized, the client should be unable to do any more metadata ops
# The way that the client currently behaves here is to block (it acts like
# it has lost network, because there is nothing to tell it that is messages
# are being dropped because it's identity is gone)
background = self.mounts[2].write_n_mb("rogue.bin", 1, wait=False)
try:
background.wait()
except CommandFailedError:
# command failed with EBLACKLISTED?
if "transport endpoint shutdown" in background.stderr.getvalue():
pass
else:
raise
# After deauthorisation, the client ID should be gone (this was the only
# volume it was authorised for)
self.assertNotIn("client.{0}".format(guest_entity), [e['entity'] for e in self.auth_list()])
# Clean up the dead mount (ceph-fuse's behaviour here is a bit undefined)
self.mounts[2].umount_wait()
self._volume_client_python(self.mount_b, dedent("""
vp = VolumePath("{group_id}", "{volume_id}")
vc.delete_volume(vp)
vc.purge_volume(vp)
""".format(
group_id=group_id,
volume_id=volume_id,
)), volume_prefix, namespace_prefix)
def test_idempotency(self):
"""
That the volumeclient interface works when calling everything twice
"""
self.mount_b.umount_wait()
self._configure_vc_auth(self.mount_b, "manila")
guest_entity = "guest"
group_id = "grpid"
volume_id = "volid"
self._volume_client_python(self.mount_b, dedent("""
vp = VolumePath("{group_id}", "{volume_id}")
vc.create_volume(vp, 10)
vc.create_volume(vp, 10)
vc.authorize(vp, "{guest_entity}")
vc.authorize(vp, "{guest_entity}")
vc.deauthorize(vp, "{guest_entity}")
vc.deauthorize(vp, "{guest_entity}")
vc.delete_volume(vp)
vc.delete_volume(vp)
vc.purge_volume(vp)
vc.purge_volume(vp)
vc.create_volume(vp, 10, data_isolated=True)
vc.create_volume(vp, 10, data_isolated=True)
vc.authorize(vp, "{guest_entity}")
vc.authorize(vp, "{guest_entity}")
vc.deauthorize(vp, "{guest_entity}")
vc.deauthorize(vp, "{guest_entity}")
vc.evict("{guest_entity}")
vc.evict("{guest_entity}")
vc.delete_volume(vp, data_isolated=True)
vc.delete_volume(vp, data_isolated=True)
vc.purge_volume(vp, data_isolated=True)
vc.purge_volume(vp, data_isolated=True)
vc.create_volume(vp, 10, namespace_isolated=False)
vc.create_volume(vp, 10, namespace_isolated=False)
vc.authorize(vp, "{guest_entity}")
vc.authorize(vp, "{guest_entity}")
vc.deauthorize(vp, "{guest_entity}")
vc.deauthorize(vp, "{guest_entity}")
vc.evict("{guest_entity}")
vc.evict("{guest_entity}")
vc.delete_volume(vp)
vc.delete_volume(vp)
vc.purge_volume(vp)
vc.purge_volume(vp)
""".format(
group_id=group_id,
volume_id=volume_id,
guest_entity=guest_entity
)))
def test_data_isolated(self):
"""
That data isolated shares get their own pool
:return:
"""
# Because the teuthology config template sets mon_max_pg_per_osd to
# 10000 (i.e. it just tries to ignore health warnings), reset it to something
# sane before using volume_client, to avoid creating pools with absurdly large
# numbers of PGs.
self.set_conf("global", "mon max pg per osd", "300")
for mon_daemon_state in self.ctx.daemons.iter_daemons_of_role('mon'):
mon_daemon_state.restart()
self.mount_b.umount_wait()
self._configure_vc_auth(self.mount_b, "manila")
# Calculate how many PGs we'll expect the new volume pool to have
osd_map = json.loads(self.fs.mon_manager.raw_cluster_cmd('osd', 'dump', '--format=json-pretty'))
max_per_osd = int(self.fs.get_config('mon_max_pg_per_osd'))
osd_count = len(osd_map['osds'])
max_overall = osd_count * max_per_osd
existing_pg_count | |
<filename>parseUserTimeline.py<gh_stars>0
import argparse, collections, fnmatch, json, math, mysql.connector as sql, os, requests, sys, time
from ConfigParser import SafeConfigParser
from datetime import datetime
from mysql.connector import errorcode
from requests import HTTPError
from requests import ConnectionError
def printUTF8(info) :
print(info.encode('ascii', 'replace').decode())
def convert(input):
if isinstance(input, dict):
# print "dict"
return {convert(key): convert(value) for key, value in input.iteritems()}
elif isinstance(input, list):
# print "list"
return [convert(element) for element in input]
elif isinstance(input, unicode):
# print "unicode"
return input.encode('utf-8')
else:
# print "other"
return input
# Connect to MySQL using config entries
def connect() :
config = SafeConfigParser()
script_dir = os.path.dirname(__file__)
config_file = os.path.join(script_dir, 'config/settings.cfg')
config.read(config_file)
sections = config.sections(); sections.remove("MySQL")
db_params = {
'user' : config.get("MySQL","user"),
'password' : <PASSWORD>("MySQL","password"),
'host' : config.get("MySQL","host"),
'port' : int(config.get("MySQL","port")),
'database' : config.get("MySQL","database"),
'charset' : 'utf8',
'collation' : 'utf8_general_ci',
'buffered' : True
}
return sql.connect(**db_params)
def getUserId(conn, tweet, twitter_id, user_type):
cursor = conn.cursor()
try :
cursor.execute("SELECT id FROM users_user WHERE twitter_id = %s" % twitter_id)
conn.commit()
if cursor.rowcount == 1:
row = cursor.fetchone()
if row[0] == "None":
user_id = addUser(conn, tweet, user_type, twitter_id)
else:
user_id = row[0]
else:
user_id = addUser(conn, tweet, user_type, twitter_id)
except sql.Error as err :
print(">>>> Warning: Could not check whether user exists: %s" % str(err))
print(" Query: %s" % cursor.statement)
cursor.close()
return user_id
# add a user to the db whether s/he's a sender, was mentioned, or was replied to
def addUser(conn, tweet, user_type, user_id) :
cursor = conn.cursor()
# check to see if the user already exists
try :
cursor.execute("SELECT id FROM users_user WHERE twitter_id = %s" % user_id)
conn.commit()
except sql.Error as err :
print(">>>> Warning: Could not add User: %s" % str(err))
print(" Query: %s" % cursor.statement)
# if the user doesn't already exist
if cursor.rowcount == 0:
put_user_query = "INSERT INTO users_user (twitter_id, twitter_name, fullname, followers, following, " \
"favorites, tweets, timezone) " \
"VALUES(%s, %s, %s, %s, %s, %s, %s, %s )"
if user_type == "sender":
values = [
tweet["user"]["id_str"],
tweet["user"]["screen_name"],
tweet["user"]["name"],
tweet["user"]["followers_count"],
tweet["user"]["friends_count"],
tweet["user"]["favourites_count"],
tweet["user"]["statuses_count"],
tweet["user"]["time_zone"],
]
elif user_type == "mention":
values = [
user_id,
"", # name
"", # fullname
"", # followers
"", # friends
"", # favourites
"", # statuses
"", # time_zone
]
elif user_type == "reply":
values = [
tweet["in_reply_to_user_id"],
"", # name
"", # fullname
"", # followers
"", # friends
"", # favourites
"", # statuses
"", # time_zone
]
try :
cursor.execute(put_user_query, values)
conn.commit()
user_id = cursor.lastrowid
except sql.Error as err :
print(">>>> Warning: Could not add User: %s" % str(err))
print(" Query: %s" % cursor.statement)
#continue
#sys.exit()
cursor.close()
return user_id
def updateUser(conn, tweet, user_id):
cursor = conn.cursor()
try :
cursor.execute("SELECT id FROM users_user WHERE twitter_id = %s AND tweets = 0" % user_id)
conn.commit()
except sql.Error as err :
print(">>>> Warning: Could not add User: %s" % str(err))
print(" Query: %s" % cursor.statement)
if cursor.rowcount == 1:
row = cursor.fetchone()
user_id = row[0]
update_user_query = "UPDATE users_user SET " \
"twitter_name = %s, " \
"fullname = %s, " \
"followers = %s, " \
"following = %s, " \
"favorites = %s, " \
"tweets = %s, " \
"timezone = %s " \
"WHERE id = %s"
values = [
tweet["user"]["screen_name"],
tweet["user"]["name"],
tweet["user"]["followers_count"],
tweet["user"]["friends_count"],
tweet["user"]["favourites_count"],
tweet["user"]["statuses_count"],
tweet["user"]["time_zone"],
user_id,
]
try :
cursor.execute(update_user_query, values)
conn.commit()
except sql.Error as err :
print(">>>> Warning: Could not update user: %s" % str(err))
print(" Query: %s" % cursor.statement)
finally :
cursor.close()
# Add a tweet to the DB
def addTweet(conn, tweet) :
cursor = conn.cursor()
prefix = "INSERT INTO tweets_tweet (tweet_id, created_at, text, source, iso_language"
suffix = ") VALUES (%s, %s, %s, %s, %s"
values = [
tweet["id_str"],
datetime.strptime(tweet["created_at"], '%a %b %d %H:%M:%S +0000 %Y').strftime('%Y-%m-%d %H:%M:%S'),
tweet["text"],
tweet["source"],
tweet["lang"]
]
# Optionally include the geo data
if tweet['geo'] is not None and tweet['geo']['type'] == "Point" :
prefix = prefix + ", location_geo, location_geo_0, location_geo_1"
suffix = suffix + ", Point(%s,%s), %s, %s"
values.extend([
tweet["geo"]["coordinates"][0],
tweet["geo"]["coordinates"][1],
tweet["geo"]["coordinates"][0],
tweet["geo"]["coordinates"][1]
])
suffix = suffix + ")"
query = (prefix + suffix)
try :
cursor.execute(query, values)
conn.commit()
return True
except sql.Error as err :
print(">>>> Warning: Could not add Tweet: %s" % str(err))
print(" Query: %s" % cursor.statement)
return False
finally :
cursor.close()
# Add hashtag entities to the DB
def addHashtags(conn, tweet) :
cursor = conn.cursor()
query = "INSERT INTO hashtags_hashtag (tweet_id_id, text, index_start, index_end) " \
"VALUES(%s, %s, %s, %s)"
for hashtag in tweet['entities']['hashtags'] :
values = [
tweet["id_str"],
hashtag["text"],
hashtag["indices"][0],
hashtag["indices"][1]
]
try :
cursor.execute(query, values)
conn.commit()
except sql.Error as err :
print(">>>> Warning: Could not add hashtag: %s" % str(err))
print(" Query: %s" % cursor.statement)
cursor.close()
# Add user mention entities to the DB
def addUserMentions(conn, tweet) :
cursor = conn.cursor()
mentions_query = "INSERT INTO mentions_mention (tweet_id_id, screen_name, name, index_start, index_end) " \
"VALUES(%s, %s, %s, %s, %s)"
user_mentions_query = "INSERT INTO mentions_mention_id_str (mention_id, user_id) " \
"VALUES(%s, %s)"
for mention in tweet['entities']['user_mentions'] :
# add the mention to the mentions table
mention_values = [
tweet["id_str"],
mention["screen_name"],
mention["name"],
mention["indices"][0],
mention["indices"][1]
]
try :
cursor.execute(mentions_query, mention_values)
conn.commit()
mention_id = cursor.lastrowid
except sql.Error as err :
print(">>>> Warning: Could not add Mention: %s" % str(err))
print(" Query: %s" % cursor.statement)
user_id = getUserId(conn, tweet, mention["id_str"], "mention")
# add the relationship mention-user to the appropriate table
# change two
try :
user_mention_values = [
mention_id,
user_id
]
except :
print "error in mention_id"
continue
try :
cursor.execute(user_mentions_query, user_mention_values)
conn.commit()
except sql.Error as err :
print(">>>> Warning: Could not add mention: %s" % str(err))
print(" Query: %s" % cursor.statement)
cursor.close()
# Add all URL entities to the DB
def addLinks(conn, tweet) :
cursor = conn.cursor()
query = "INSERT INTO links_link (tweet_id_id, url, expanded_url, display_url, index_start, index_end) " \
"VALUES(%s, %s, %s, %s, %s, %s)"
for url in tweet['entities']['urls'] :
values = [
tweet["id_str"],
url["url"],
url["expanded_url"] if "expanded_url" in url else "",
url["display_url"] if "display_url" in url else "",
url["indices"][0],
url["indices"][1]
]
try :
cursor.execute(query, values)
conn.commit()
except sql.Error as err :
print(">>>> Warning: Could not add link: %s" % str(err))
print(" Query: %s" % cursor.statement)
cursor.close()
# Add User_Tweet relationship to the many-to-many table
def addUserTweets(conn, tweet):
cursor = conn.cursor()
try :
user_id = getUserId(conn, tweet, tweet["user"]["id"], "sender")
except :
print "Can't getUserId for %s" % tweet["user"]["id"]
sys.exit()
# insert source user
source_query = "INSERT INTO users_userstweets (user_id, tweet_id, source, target) " \
"VALUES(%s, %s, %s, %s)"
values = [user_id, tweet["id_str"], 1, 0]
try:
cursor.execute(source_query, values)
conn.commit()
except sql.Error as err :
print(">>>> Warning: Could not add source user info: %s" % str(err))
print(" Query: %s" % cursor.statement)
# insert target user (if there is one)
if tweet["in_reply_to_user_id"] :
# check to see if the user exists
user_id = getUserId(conn, tweet, tweet["in_reply_to_user_id"], "reply")
target_query = "INSERT INTO users_userstweets (user_id, tweet_id, source, target) " \
"VALUES(%s, %s, %s, %s)"
values = [tweet["in_reply_to_user_id"], tweet["id_str"], 0, 1]
try:
cursor.execute(target_query, values)
conn.commit()
except sql.Error as err :
print(">>>> Warning: Could not add target user info: %s" % str(err))
print(" Query: %s" % cursor.statement)
cursor.close() # Add User_Tweet relationship to the many-to-many table
# Main function
if __name__ == '__main__' :
config = SafeConfigParser()
script_dir = os.path.dirname(__file__)
config_file = os.path.join(script_dir, 'config/settings.cfg')
config.read(config_file)
# Display startup info
print("vvvvv Start:", datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
print("Connecting to database...")
try :
run_total_count = 0
conn = connect()
print("Connected")
# Get the Tweets
dir = config.get('files','outfolder')
for file in os.listdir(dir):
f = open(dir+'/'+file, 'r')
print "Working on %s " % f
#print "before"
tweets = collections.deque()
#print "after"
try:
#tweets = [json.loads(line) for line in f.readlines()]
for line in f:
tweets = convert(json.loads(line))
total_results = 0
count = 1
total = len(tweets)
for tweet in tweets :
# total_results = total_results + 1
# print "now on tweet %s" % tweet["text"]
# Insert the tweet in the DB
success = addTweet(conn, tweet)
# addTweet(conn, tweet)
# Insert the tweet entities in the DB
if success == False:
print("Failed to | |
<reponame>Jinsongl/UQRA
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2019 <NAME> <<EMAIL>>
#
# Distributed under terms of the GNU-License license.
"""
"""
from uqra.experiment._experimentbase import ExperimentBase
import uqra.utilities.helpers as helpers
import numpy as np, scipy as sp
import copy
import itertools
from tqdm import tqdm
import time, math
import multiprocessing as mp
import warnings
class OptimalDesign(ExperimentBase):
""" Quasi-Optimal Experimental Design and Optimal Design"""
def __init__(self, X):
"""
Optimal/Quasi Optimal Experimental design:
Arguments:
X: {array-like, sparse matrix} of shape (n_samples, n_features)
optimal_samples: list of indices for optimal samples
"""
super().__init__()
## return the candidate design matrix after removing selected rows
self.X = np.array(X, copy=False, ndmin=2, dtype=np.float64)
self.optimal_samples = []
self.candidate_samples = list(np.arange(X.shape[0]))
def __str__(self):
return('UQRA.Experiment.OptimalDesign')
def samples(self, optimality, n, initialization='AFP', algorithm='GREEDY', **kwargs):
"""
Perform Optimal design with specified optimality, return n samples
Return n new samples from X : Design matrix X(u) of shape(n_samples, n_features)
Arguments:
optimality: str, alphabetic optimal design
n: int, number of new samples TO BE ADDED
initialization: method to initialize optimal sample sets
1. 'TSM': truncated square matrices
2. 'AFP': Approximated Fekete Point
3. a list of indices represeting selected samples from candidate
algorithm: algorithm employed to perform each optimality
Optional:
is_orth_col: boolean, True if columns of design matrix is orthogonal to each other asymptotically
Return:
list of row indices selected
"""
n = helpers.check_int(n)
optimality = str(optimality).upper()
# self.filename = '_'.join(['DoE', optimality])
print(' > UQRA {:s}-Optimality Design: n={:d} ...'.format(optimality, n))
if isinstance(initialization, str):
## selected optimal samples must be empty
assert len(self.optimal_samples) == 0
n_initialization = min(self.X.shape[0], self.X.shape[1], n)
if initialization.upper() in ['QR', 'RRQR', 'AFP', 'FEKETE']:
optimal_samples = self._initial_samples_rrqr(n_initialization)
print(' -> 1: Initialization ({:s}), n={:d} ...'.format(initialization, len(optimal_samples)))
elif initialization.upper() in ['TSM', 'TRUNCATED', 'SQUARE']:
optimal_samples = self._initial_samples_greedy_tsm(n_initialization, optimality)
print(' -> 1: Initialization ({:s}), n={:d} ...'.format(initialization, len(optimal_samples)))
else:
print(' -> UQRA {:s}-Optimality Design: Initialization {:s} NOT implemented'.format(initialization))
raise NotImplementedError
n = n - len(optimal_samples)
elif isinstance(initialization, (list, tuple, np.ndarray, np.generic)):
## Initialize with preselected sampels
X_rank = min(self.X.shape) ## rank of design matrix
optimal_samples = list(np.array(initialization).flatten())
print(' -> 1: Initialization with selected samples: n={:d} ...'.format(len(optimal_samples)))
## if preselected samples is less than X_rank, truncated square matrix is used
if len(optimal_samples) < X_rank:
optimal_samples0 = self._initial_samples_greedy_tsm(min(X_rank-len(optimal_samples),n), optimality,
optimal_samples=optimal_samples)
### optimal_samples0 includes preselected samples in optimal_samples
n = n - len(optimal_samples0) + len(optimal_samples)
optimal_samples = optimal_samples0
else:
print(' > {} not implemented for UQRA.OptiamlDesign'.format(initialization))
raise NotImplementedError
self.optimal_samples = optimal_samples
self.candidate_samples = self._list_diff(self.candidate_samples, optimal_samples)
assert self._check_complement(self.optimal_samples, self.candidate_samples)
if n>0:
print(' -> 2: Continue Optimality Design, n={:d} ...'.format(n))
if optimality.upper() == 'D':
optimal_samples = self.get_D_Optimality_samples(n, algorithm=algorithm)
elif optimality.upper() == 'S':
optimal_samples = self.get_S_Optimality_samples(n, algorithm=algorithm)
else:
raise ValueError('optimality {} not defined'.format(optimality))
else:
optimal_samples = []
self.optimal_samples = self._list_union(self.optimal_samples , optimal_samples)
self.candidate_samples = self._list_diff (self.candidate_samples, optimal_samples)
assert self._check_complement(self.optimal_samples, self.candidate_samples)
return self.optimal_samples
def get_D_Optimality_samples(self, n, algorithm):
"""
Optimal design with D-optimality
Arguments:
n: int, number of samples to be returned
Algorithm: str, algorithms used to generated D-optimal samples
Available in reference:
1 General Exchange Procedure
2 DETMAX Algorithm
3 Fedorov Algorithm
4 Modified Fedorov Algorithm
5 k-Exchange Algorithm
6 kl-Exchange Algorithm
7 Modified kl-Exchange Algorithm
Available in MATLAB:
cordexch
rowexch
Algorithms reference:
<NAME>. (2008). Design of experiments: the D-optimal approach and its implementation as a computer initialization. Bachelor's Thesis in Information and Communication Technology.
"""
if algorithm.upper() == 'GREEDY':
candidate_samples = copy.deepcopy(self.candidate_samples)
optimal_samples = copy.deepcopy(self.optimal_samples)
for _ in tqdm(range(n), ascii=True, desc=" [Greedy D]",ncols=80):
## find the next optimal index from Q which is not currently selected
candidate_samples = self._list_diff(candidate_samples, optimal_samples)
assert self._check_complement(optimal_samples, candidate_samples)
X_cand = self.X[candidate_samples ,:]
X_sltd = self.X[optimal_samples,:]
## calculate (log)S values for each row in X_cand together with X_sltd
optimality_values = self._greedy_update_D_Optimality_full(X_sltd, X_cand)
if len(optimality_values) != len(candidate_samples):
raise ValueError('Expecting {:d} S values, but {:d} given'.format(len(candidate_samples), len(optimality_values)))
i = candidate_samples[np.argmax(optimality_values)] ## return the indices with largest s-value in original matrix Q
## check if this index is already selected
if i in optimal_samples:
print('Row {:d} already selected'.format(i))
raise ValueError('Duplicate sample {:d} already exists'.format(i))
optimal_samples.append(i)
else:
print('NotImplementedError: UQRA.OptimalDesign.get_D_Optimality_samples: algorithm {:s} not implemented...'.format(algorithm))
raise NotImplementedError
optimal_samples = self._list_diff(optimal_samples, self.optimal_samples)
return optimal_samples
def get_S_Optimality_samples(self, n, algorithm):
"""
Optimal design with S-optimality
Arguments:
n: int, number of samples to be returned
Algorithm: str, algorithms used to initialize S-optimal samples
Algorithms reference:
<NAME>., <NAME>., <NAME>., & <NAME>. (2010). Computing multivariate Fekete and Leja points by numerical linear algebra. SIAM Journal on Numerical Analysis, 48(5), 1984-1999.
"""
if algorithm.upper() == 'GREEDY':
candidate_samples = copy.deepcopy(self.candidate_samples)
optimal_samples = copy.deepcopy(self.optimal_samples)
for _ in tqdm(range(n), ascii=True, desc=" [Greedy S]",ncols=80):
## find the next optimal index from Q which is not currently selected
candidate_samples = self._list_diff(candidate_samples, optimal_samples)
assert self._check_complement(optimal_samples, candidate_samples)
X_cand = self.X[candidate_samples ,:]
X_sltd = self.X[optimal_samples,:]
## calculate (log)S values for each row in X_cand together with X_sltd
optimality_values = self._greedy_update_S_Optimality_full(X_sltd, X_cand)
if len(optimality_values) != len(candidate_samples):
raise ValueError('Expecting {:d} S values, but {:d} given'.format(len(candidate_samples), len(optimality_values)))
i = candidate_samples[np.argmax(optimality_values)] ## return the indices with largest s-value in original matrix Q
## check if this index is already selected
if i in optimal_samples:
print('Row {:d} already selected'.format(i))
raise ValueError('Duplicate sample {:d} already exists'.format(i))
optimal_samples.append(i)
else:
print('NotImplementedError: UQRA.OptimalDesign.get_D_Optimality_samples: algorithm {:s} not implemented...'.format(algorithm))
raise NotImplementedError
optimal_samples = self._list_diff(optimal_samples, self.optimal_samples)
return optimal_samples
def _initial_samples_rrqr(self, n):
"""
Return rows corresponding to largest absolute singular values in design matrix X based on RRQR
Arguments:
n: set of selected indices
"""
n = helpers.check_int(n)
X = self.X[self.candidate_samples, :]
if n > min(X.shape):
raise ValueError('Can only return at most rank(X) samples')
# print(' - [Initialization (RRQR)]'.ljust(80, '#'))
_,_,Pivot = sp.linalg.qr(X.T, pivoting=True)
optimal_samples = [self.candidate_samples[i] for i in Pivot[:n]]
return optimal_samples
def _initial_samples_greedy_tsm(self, n, optimality, optimal_samples=None):
"""
return initial samples selected based with truncated square matrices
"""
n = helpers.check_int(n)
if n > min(len(self.candidate_samples), self.X.shape[1]):
raise ValueError('Can only return at most rank(X) samples')
## if no samples are selected currectly, then first sample is drawn randomly
## the rest n-1 samples are draw with greedy algorithm successfully
if optimal_samples is None:
optimal_samples = [np.random.randint(0, self.X.shape[0], size=1).item(),]
n = n-1
candidate_samples = copy.deepcopy(self.candidate_samples)
optimal_samples = copy.deepcopy(optimal_samples)
for _ in tqdm(range(n), ascii=True, desc=" - [Initialization (TSM)-{:s}]".format(optimality),ncols=80):
# for _ in range(n):
## find the next optimal index from Q which is not currently selected
candidate_samples = self._list_diff(candidate_samples, optimal_samples)
assert self._check_complement(optimal_samples, candidate_samples)
X_cand = self.X[candidate_samples ,:]
X_sltd = self.X[optimal_samples,:]
## calculate (log)S values for each row in X_cand together with X_sltd
if optimality == 'S':
optimality_values = self._greedy_update_S_Optimality_truncate(X_sltd, X_cand)
# np.savetxt('optimality_values_S{:d}.csv'.format(optimality_values.size), optimality_values, delimiter=",")
elif optimality == 'D':
optimality_values = self._greedy_update_D_Optimality_truncate(X_sltd, X_cand)
# np.savetxt('optimality_values_D{:d}.csv'.format(optimality_values.size), optimality_values, delimiter=",")
else:
print(' > UQRA {:s}-Optimal Design for TSM{:s} NOT implemented'.format(optimality))
raise NotImplementedError
if len(optimality_values) != len(candidate_samples):
raise ValueError('Expecting {:d} S values, but {:d} given'.format(len(candidate_samples), len(optimality_values)))
i = candidate_samples[np.argmax(optimality_values)] ## return the indices with largest s-value in original matrix Q
# print(optimality_values.T, i)
## check if this index is already selected
if i in optimal_samples:
print('Row {:d} already selected'.format(i))
raise ValueError('Duplicate sample {:d} already exists'.format(i))
optimal_samples.append(i)
return optimal_samples
def _greedy_update_S_Optimality_full(self, A, B):
"""
Calculate S-value with matrix determinant update formula for each row element in B
Only for overdetermined system, i.e. A.T * A is not singular, i.e. n0 > p
Arguments:
A: ndarray of shape(n0, p), selected
B: ndarray of shape(n1, p), candidate
Return:
log(S-values): S([A; r.T]), r is row in B
"""
A = np.array(A, copy=False, ndmin=2)
B = np.array(B, copy=False, ndmin=2)
if A.shape[0] < A.shape[1]:
raise ValueError('S-value updating formula only works for overdetermined system, however given {}'.format(A.shape))
if A.shape[1] != B.shape[1]:
raise ValueError('matrix A, B must have same number of columns')
n0, p = A.shape
n1, p = B.shape
try:
AAinv = np.linalg.inv(A.T.dot(A)) ## shape (p, p)
except np.linalg.LinAlgError:
u,s,v = np.linalg.svd(A.T.dot(A))
print('singular value of A.T *A: {}'.format(s))
d1 = np.log(1.0 + (B.dot(AAinv) * B).sum(-1)) ## (n1,)
A_col_norm = np.linalg.norm(A, axis=0)
d2 = np.sum(np.log(A_col_norm**2 + B**2), axis=1) ## (n1)
res = d1 - d2
return np.squeeze(res)
| |
<reponame>spielkind/python-otcextensions<filename>otcextensions/sdk/auto_scaling/v1/_proxy.py<gh_stars>0
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack import proxy
from otcextensions.sdk.auto_scaling.v1 import activity as _activity
from otcextensions.sdk.auto_scaling.v1 import config as _config
from otcextensions.sdk.auto_scaling.v1 import group as _group
from otcextensions.sdk.auto_scaling.v1 import instance as _instance
from otcextensions.sdk.auto_scaling.v1 import policy as _policy
from otcextensions.sdk.auto_scaling.v1 import quota as _quota
class Proxy(proxy.Proxy):
skip_discovery = True
# ======== Groups ========
def groups(self, **query):
"""Retrieve a generator of groups
:param dict query: Optional query parameters to be sent to limit the
resources being returned.
* ``name``: group name
* ``status``: group status, ``INSERVICE``, ``PAUSED``, ``ERROR``
* ``scaling_configuration_id``: scaling configuration id
* ``marker``: pagination marker, known as ``start_number``
* ``limit``: pagination limit
:returns: A generator of group
(:class:`~otcextensions.sdk.auto_scaling.v1.group.Group`) instances
"""
return self._list(_group.Group, **query)
def create_group(self, **attrs):
"""Create a new group from attributes
:param dict attrs: Keyword arguments which will be used to create
a :class:`~otcextensions.sdk.auto_scaling.v1.group.Group`,
comprised of the properties on the Group class.
:returns: The results of group creation
:rtype: :class:`~otcextensions.sdk.auto_scaling.v1.group.Group`
"""
return self._create(
_group.Group, prepend_key=False, **attrs
)
def update_group(self, group, **attrs):
"""update group with attributes
:param group: The value can be the ID of a group
or a :class:`~otcextensions.sdk.auto_scaling.v1.group.Group`
instance.
:param dict attrs: Keyword arguments which will be used to create
a :class:`~otcextensions.sdk.auto_scaling.v1.group.Group`,
comprised of the properties on the Group class.
:returns: The results of group creation
:rtype: :class:`~otcextensions.sdk.auto_scaling.v1.group.Group`
"""
return self._update(
_group.Group, group, prepend_key=False, **attrs)
def get_group(self, group):
"""Get a group
:param group: The value can be the ID of a group
or a :class:`~otcextensions.sdk.auto_scaling.v1.group.Group`
instance.
:returns: Group instance
:rtype: :class:`~otcextensions.sdk.auto_scaling.v1.group.Group`
"""
return self._get(
_group.Group, group,
)
def delete_group(self, group, ignore_missing=True):
"""Delete a group
:param group: The value can be the ID of a group
or a :class:`~otcextensions.sdk.auto_scaling.v1.group.Group`
instance.
:param bool ignore_missing: When set to ``False``
:class:`~openstack.exceptions.ResourceNotFound` will be raised when
the group does not exist.
When set to ``True``, no exception will be set when attempting to
delete a nonexistent group.
"""
return self._delete(
_group.Group, group, ignore_missing=ignore_missing,
)
def find_group(self, name_or_id, ignore_missing=True):
"""Find a single group
:param name_or_id: The name or ID of a group
:param bool ignore_missing: When set to ``False``
:class:`~openstack.exceptions.ResourceNotFound` will be raised
when the group does not exist.
When set to ``True``, no exception will be set when attempting
to delete a nonexistent group.
:returns: ``None``
"""
return self._find(
_group.Group, name_or_id,
ignore_missing=ignore_missing,
)
def resume_group(self, group):
"""resume group
:param group: The value can be the ID of a group
or a :class:`~otcextensions.sdk.auto_scaling.v1.group.Group`
instance.
"""
group = self._get_resource(
_group.Group, group)
return group.resume(self)
def pause_group(self, group):
"""pause group
:param group: The value can be the ID of a group
or a :class:`~otcextensions.sdk.auto_scaling.v1.group.Group`
instance.
"""
group = self._get_resource(
_group.Group, group
)
return group.pause(self)
# ======== Configurations ========
def configs(self, **query):
"""Retrieve a generator of configs
:param dict query: Optional query parameters to be sent to limit the
resources being returned.
* ``name``: configuration name
* ``image_id``: image id
* ``marker``: pagination marker
* ``limit``: pagination limit
:returns: A generator of config
(:class:`~otcextensions.sdk.auto_scaling.v1.config.Config`)
instances
"""
return self._list(_config.Config, **query)
def create_config(self, name, **attrs):
"""Create a new config from config name and instance-config attributes
:param name: auto scaling config name
:param dict attrs: Keyword arguments which will be used to create
a :class:`~otcextensions.sdk.auto_scaling.v1.config.InstanceConfig`
, comprised of the properties on the InstanceConfig class.
:returns: The results of config creation
:rtype: :class:`~otcextensions.sdk.auto_scaling.v1.config.Config`
"""
return self._create(
_config.Config,
prepend_key=False,
name=name,
**attrs
)
def get_config(self, config):
"""Get a config
:param config: The value can be the ID of a config
or a :class:`~otcextensions.sdk.auto_scaling.v1.config.Config`
instance.
:returns: Config instance
:rtype: :class:`~otcextensions.sdk.auto_scaling.v1.config.Config`
"""
return self._get(_config.Config, config)
# Name is not unique, so find might return multiple results
def find_config(self, name_or_id, ignore_missing=True):
"""Get a config
:param name_or_id: The name or ID of a config
:param bool ignore_missing: When set to ``False``
:class:`~openstack.exceptions.ResourceNotFound` will be raised
when the config does not exist.
When set to ``True``, no exception will be set when attempting
to delete a nonexistent config.
:returns: Config instance
:rtype: :class:`~otcextensions.sdk.auto_scaling.v1.config.Config`
"""
return self._find(
_config.Config, name_or_id,
ignore_missing=ignore_missing,
)
def delete_config(self, config, ignore_missing=True):
"""Delete a config
:param config: The value can be the ID of a config
or a :class:`~otcextensions.sdk.auto_scaling.v1.config.Config`
instance.
:param bool ignore_missing: When set to ``False``
:class:`~openstack.exceptions.ResourceNotFound` will be raised when
the config does not exist.
When set to ``True``, no exception will be set when attempting to
delete a nonexistent config.
:returns: Config been deleted
:rtype: :class:`~otcextensions.sdk.auto_scaling.v1.config.Config`
"""
return self._delete(
_config.Config, config,
ignore_missing=ignore_missing,
)
def batch_delete_configs(self, configs):
"""batch delete configs
:param configs: The list item value can be the ID of a config
or a :class:`~otcextensions.sdk.auto_scaling.v1.config.Config`
instance.
"""
config = _config.Config()
return config.batch_delete(self, configs)
# ======== Policy ========
def policies(self, group, **query):
"""Retrieve a generator of policies
:param group: The value can be the ID of a group
or a :class:`~otcextensions.sdk.auto_scaling.v1.group.Group`
instance.
:param dict query: Optional query parameters to be sent to limit the
resources being returned.
* ``name``: policy name
* ``type``: policy type
* ``scaling_group_id``: scaling group id the policy applied to
* ``marker``: pagination marker
* ``limit``: pagination limit
:returns: A generator of policy
(:class:`~otcextensions.sdk.auto_scaling.v1.policy.Policy`)
instances
"""
group = self._get_resource(_group.Group, group)
return self._list(
_policy.Policy,
base_path='/scaling_policy/{id}/list'.format(id=group.id), **query)
def create_policy(self, **attrs):
"""Create a new policy from attributes
:param dict attrs: Keyword arguments which will be used to create
a :class:`~otcextensions.sdk.auto_scaling.v1.policy.Policy`,
comprised of the properties on the Policy class.
:returns: The results of policy creation
:rtype: :class:`~otcextensions.sdk.auto_scaling.v1.policy.Policy`
"""
return self._create(_policy.Policy, prepend_key=False, **attrs)
def update_policy(self, policy, **attrs):
"""update policy with attributes
:param policy: The value can be the ID of a policy
or a :class:`~otcextensions.sdk.auto_scaling.v1.policy.Policy`
instance.
:param dict attrs: Keyword arguments which will be used to create
a :class:`~otcextensions.sdk.auto_scaling.v1.policy.Policy`,
comprised of the properties on the Policy class.
:returns: The results of policy creation
:rtype: :class:`~otcextensions.sdk.auto_scaling.v1.policy.Policy`
"""
return self._update(_policy.Policy, policy, prepend_key=False, **attrs)
def get_policy(self, policy):
"""Get a policy
:param policy: The value can be the ID of a policy
or a :class:`~otcextensions.sdk.auto_scaling.v1.policy.Policy`
instance.
:returns: Policy instance
:rtype: :class:`~otcextensions.sdk.auto_scaling.v1.policy.Policy`
"""
return self._get(_policy.Policy, policy)
def delete_policy(self, policy, ignore_missing=True):
"""Delete a policy
:param policy: The value can be the ID of a policy
or a :class:`~otcextensions.sdk.auto_scaling.v1.policy.Policy`
instance.
:param bool ignore_missing: When set to ``False``
:class:`~openstack.exceptions.ResourceNotFound` will be raised when
the policy does not exist.
When set to ``True``, no exception will be set when attempting to
delete a nonexistent policy.
:returns: Policy been deleted
:rtype: :class:`~otcextensions.sdk.auto_scaling.v1.policy.Policy`
"""
return self._delete(_policy.Policy, policy,
ignore_missing=ignore_missing)
def find_policy(self, name_or_id, group, ignore_missing=True):
"""Find a single policy
:param name_or_id: The name or ID of a policy
:param group: ID of a group
:param bool ignore_missing: When set to ``False``
:class:`~openstack.exceptions.ResourceNotFound` will be raised
when the policy does not exist.
When set to ``True``, no exception will be set when attempting
to delete a nonexistent policy.
:returns: ``None``
"""
group = self._get_resource(_group.Group, group)
return self._find(_policy.Policy, name_or_id,
ignore_missing=ignore_missing,
group_id=group.id)
def execute_policy(self, policy):
"""execute policy
:param policy: The value can be the ID of a policy
or a :class:`~otcextensions.sdk.auto_scaling.v1.policy.Policy`
instance.
"""
policy = self._get_resource(_policy.Policy, policy)
policy.execute(self)
def resume_policy(self, policy):
"""resume policy
:param policy: The value can be the ID of a policy
or a :class:`~otcextensions.sdk.auto_scaling.v1.policy.Policy`
instance.
"""
policy = self._get_resource(_policy.Policy, policy)
policy.resume(self)
def pause_policy(self, policy):
"""pause policy
:param policy: The value can be the ID of a policy
or a :class:`~otcextensions.sdk.auto_scaling.v1.policy.Policy`
instance.
"""
policy = self._get_resource(_policy.Policy, policy)
policy.pause(self)
# ======== Instances ========
def instances(self, group, **query):
"""Retrieve a generator of instances
:param group: The value can be the ID of a group
or a :class:`~otcextensions.sdk.auto_scaling.v1.group.Group`
instance.
:param dict query: Optional query parameters to be sent to limit the
resources being returned.
* | |
import csv
from copy import deepcopy
from progressbar import ProgressBar
from retrydialog import RetryDialog
from database import *
from apimodules import *
from apithread import ApiThreadPool
from collections import deque
import StringIO
import codecs
import os
import platform
import subprocess
from export import ExportFileDialog
class Actions(object):
def __init__(self, mainWindow):
self.mainWindow = mainWindow
#Basic actions
self.basicActions = QActionGroup(self.mainWindow)
self.actionOpen = self.basicActions.addAction(QIcon(":/icons/save.png"), "Open Database")
self.actionOpen.triggered.connect(self.openDB)
self.actionNew = self.basicActions.addAction(QIcon(":/icons/new.png"), "New Database")
self.actionNew.triggered.connect(self.makeDB)
#Database actions
self.databaseActions = QActionGroup(self.mainWindow)
self.actionExport = self.databaseActions.addAction(QIcon(":/icons/export.png"), "Export Data")
self.actionExport.setToolTip("Export selected node(s) and their children to a .csv file. \n If no or all node(s) are selected inside the data-view, a complete export of all data in the DB is performed")
self.actionExport.triggered.connect(self.exportNodes)
self.actionAdd = self.databaseActions.addAction(QIcon(":/icons/add.png"), "Add Nodes")
self.actionAdd.setToolTip("Add new node(s) as a starting point for further data collection")
self.actionAdd.triggered.connect(self.addNodes)
self.actionDelete = self.databaseActions.addAction(QIcon(":/icons/delete.png"), "Delete Nodes")
self.actionDelete.setToolTip("Delete nodes(s) and their children")
self.actionDelete.triggered.connect(self.deleteNodes)
#Data actions
self.dataActions = QActionGroup(self.mainWindow)
self.actionQuery = self.dataActions.addAction(QIcon(":/icons/fetch.png"), "Query")
self.actionQuery.triggered.connect(self.querySelectedNodes)
self.actionTimer = self.dataActions.addAction(QIcon(":/icons/fetch.png"), "Time")
self.actionTimer.setToolTip("Time your data collection with a timer. Fetches the data for the selected node(s) in user-defined intervalls")
self.actionTimer.triggered.connect(self.setupTimer)
self.actionHelp = self.dataActions.addAction(QIcon(":/icons/help.png"), "Help")
self.actionHelp.triggered.connect(self.help)
self.actionLoadPreset = self.dataActions.addAction(QIcon(":/icons/presets.png"), "Presets")
self.actionLoadPreset.triggered.connect(self.loadPreset)
self.actionShowColumns = self.dataActions.addAction("Show Columns")
self.actionShowColumns.triggered.connect(self.showColumns)
self.actionClearColumns = self.dataActions.addAction("Clear Columns")
self.actionClearColumns.triggered.connect(self.clearColumns)
#Detail actions
self.detailActions = QActionGroup(self.mainWindow)
self.actionAddColumn = self.detailActions.addAction(QIcon(":/icons/addcolumn.png"),"Add Column")
self.actionAddColumn.setToolTip("Add the current JSON-key as a column in the data view")
self.actionAddColumn.triggered.connect(self.addColumn)
self.actionAddAllolumns = self.detailActions.addAction(QIcon(":/icons/addcolumn.png"),"Add All Columns")
self.actionAddAllolumns.setToolTip("Analyzes all selected nodes in the data view and adds all found keys as columns")
self.actionAddAllolumns.triggered.connect(self.addAllColumns)
self.actionUnpack = self.detailActions.addAction(QIcon(":/icons/unpack.png"),"Unpack List")
self.actionUnpack.setToolTip("Unpacks a list in the JSON-data and creates a new node containing the list content")
self.actionUnpack.triggered.connect(self.unpackList)
self.actionJsonCopy = self.detailActions.addAction(QIcon(":/icons/toclip.png"),"Copy JSON to Clipboard")
self.actionJsonCopy.setToolTip("Copy the selected JSON-data to the clipboard")
self.actionJsonCopy.triggered.connect(self.jsonCopy)
#Tree actions
self.treeActions = QActionGroup(self.mainWindow)
self.actionExpandAll = self.treeActions.addAction(QIcon(":/icons/expand.png"), "Expand nodes")
self.actionExpandAll.triggered.connect(self.expandAll)
self.actionCollapseAll = self.treeActions.addAction(QIcon(":/icons/collapse.png"), "Collapse nodes")
self.actionCollapseAll.triggered.connect(self.collapseAll)
#self.actionSelectNodes=self.treeActions.addAction(QIcon(":/icons/collapse.png"),"Select nodes")
#self.actionSelectNodes.triggered.connect(self.selectNodes)
self.actionClipboard = self.treeActions.addAction(QIcon(":/icons/toclip.png"), "Copy Node(s) to Clipboard")
self.actionClipboard.setToolTip("Copy the selected nodes(s) to the clipboard")
self.actionClipboard.triggered.connect(self.clipboardNodes)
@Slot()
def help(self):
self.mainWindow.helpwindow.show()
@Slot()
def openDB(self):
#open a file dialog with a .db filter
datadir = self.mainWindow.settings.value("lastpath", os.path.expanduser("~"))
fldg = QFileDialog(caption="Open DB File", directory=datadir, filter="DB files (*.db)")
fldg.setFileMode(QFileDialog.ExistingFile)
if fldg.exec_():
self.mainWindow.timerWindow.cancelTimer()
self.mainWindow.database.connect(fldg.selectedFiles()[0])
self.mainWindow.settings.setValue("lastpath", fldg.selectedFiles()[0])
self.mainWindow.updateUI()
self.mainWindow.tree.treemodel.reset()
@Slot()
def openDBFolder(self):
path = self.mainWindow.settings.value("lastpath",None)
if (path is not None) and (os.path.exists(path)):
if platform.system() == "Windows":
os.startfile(path)
elif platform.system() == "Darwin":
subprocess.Popen(["open", path])
else:
subprocess.Popen(["xdg-open", path])
@Slot()
def makeDB(self):
#same as openDB-Slot, but now for creating a new one on the file system
datadir = self.mainWindow.settings.value("lastpath", os.path.expanduser("~"))
fldg = QFileDialog(caption="Save DB File", directory=datadir, filter="DB files (*.db)")
fldg.setAcceptMode(QFileDialog.AcceptSave)
fldg.setDefaultSuffix("db")
if fldg.exec_():
self.mainWindow.timerWindow.cancelTimer()
self.mainWindow.database.createconnect(fldg.selectedFiles()[0])
self.mainWindow.settings.setValue("lastpath", fldg.selectedFiles()[0])
self.mainWindow.updateUI()
self.mainWindow.tree.treemodel.reset()
@Slot()
def deleteNodes(self):
reply = QMessageBox.question(self.mainWindow, 'Delete Nodes', "Are you sure to delete all selected nodes?",
QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
if reply != QMessageBox.Yes:
return
progress = ProgressBar("Deleting data...", self.mainWindow)
try:
todo = self.mainWindow.tree.selectedIndexesAndChildren(True)
progress.setMaximum(len(todo))
for index in todo:
progress.step()
self.mainWindow.tree.treemodel.deleteNode(index, delaycommit=True)
if progress.wasCanceled:
break
finally:
# commit the operation on the db-layer afterwards (delaycommit is True)
self.mainWindow.tree.treemodel.commitNewNodes()
progress.close()
@Slot()
def clipboardNodes(self):
progress = ProgressBar("Copy to clipboard", self.mainWindow)
indexes = self.mainWindow.tree.selectionModel().selectedRows()
progress.setMaximum(len(indexes))
output = StringIO.StringIO()
try:
writer = csv.writer(output, delimiter='\t', quotechar='"', quoting=csv.QUOTE_ALL, doublequote=True,
lineterminator='\r\n')
#headers
row = [unicode(val).encode("utf-8") for val in self.mainWindow.tree.treemodel.getRowHeader()]
writer.writerow(row)
#rows
for no in range(len(indexes)):
if progress.wasCanceled:
break
row = [unicode(val).encode("utf-8") for val in self.mainWindow.tree.treemodel.getRowData(indexes[no])]
writer.writerow(row)
progress.step()
clipboard = QApplication.clipboard()
clipboard.setText(output.getvalue())
finally:
output.close()
progress.close()
@Slot()
def exportNodes(self):
fldg = ExportFileDialog(self.mainWindow)
@Slot()
def addNodes(self):
if not self.mainWindow.database.connected:
return False
# makes the user add a new facebook object into the db
dialog = QDialog(self.mainWindow)
dialog.setWindowTitle("Add Nodes")
layout = QVBoxLayout()
label = QLabel("<b>Object IDs (one ID per line):</b>")
layout.addWidget(label)
input = QPlainTextEdit()
input.setMinimumWidth(500)
input.LineWrapMode = QPlainTextEdit.NoWrap
#input.acceptRichText=False
input.setFocus()
layout.addWidget(input)
buttons = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel)
layout.addWidget(buttons)
dialog.setLayout(layout)
def createNodes():
newnodes = [node.strip() for node in input.toPlainText().splitlines()]
self.mainWindow.tree.treemodel.addNodes(newnodes)
dialog.close()
def close():
dialog.close()
#connect the nested functions above to the dialog-buttons
buttons.accepted.connect(createNodes)
buttons.rejected.connect(close)
dialog.exec_()
@Slot()
def showColumns(self):
self.mainWindow.tree.treemodel.setCustomColumns(self.mainWindow.fieldList.toPlainText().splitlines())
@Slot()
def clearColumns(self):
self.mainWindow.fieldList.clear()
self.mainWindow.tree.treemodel.setCustomColumns([])
@Slot()
def addColumn(self):
key = self.mainWindow.detailTree.selectedKey()
if key != '':
self.mainWindow.fieldList.append(key)
self.mainWindow.tree.treemodel.setCustomColumns(self.mainWindow.fieldList.toPlainText().splitlines())
@Slot()
def addAllColumns(self):
progress = ProgressBar("Analyzing data...", self.mainWindow)
columns = self.mainWindow.fieldList.toPlainText().splitlines()
try:
indexes = self.mainWindow.tree.selectedIndexesAndChildren()
progress.setMaximum(len(indexes))
for no in range(len(indexes)):
progress.step()
item = indexes[no].internalPointer()
columns.extend([key for key in recursiveIterKeys(item.data['response']) if not key in columns])
if progress.wasCanceled:
break
finally:
self.mainWindow.fieldList.setPlainText("\n".join(columns))
self.mainWindow.tree.treemodel.setCustomColumns(columns)
progress.close()
@Slot()
def loadPreset(self):
self.mainWindow.presetWindow.showPresets()
@Slot()
def jsonCopy(self):
self.mainWindow.detailTree.copyToClipboard()
@Slot()
def unpackList(self):
try:
key = self.mainWindow.detailTree.selectedKey()
if key == '':
return False
selected = self.mainWindow.tree.selectionModel().selectedRows()
for item in selected:
if not item.isValid():
continue
treenode = item.internalPointer()
treenode.unpackList(key)
except Exception as e:
self.mainWindow.logmessage(e)
@Slot()
def expandAll(self):
self.mainWindow.tree.expandAll()
@Slot()
def collapseAll(self):
self.mainWindow.tree.collapseAll()
@Slot()
def selectNodes(self):
self.mainWindow.selectNodesWindow.show()
def queryNodes(self, indexes=None, apimodule=False, options=False):
if not self.actionQuery.isEnabled() or not ((self.mainWindow.tree.selectedCount > 0) or (indexes is not None)):
return (False)
#Show progress window
progress = ProgressBar(u"Fetching Data",parent=self.mainWindow)
try:
#Get global options
globaloptions = {}
globaloptions['threads'] = self.mainWindow.threadsEdit.value()
globaloptions['speed'] = self.mainWindow.speedEdit.value()
globaloptions['errors'] = self.mainWindow.errorEdit.value()
globaloptions['logrequests'] = self.mainWindow.logCheckbox.isChecked()
objecttypes = self.mainWindow.typesEdit.text().replace(' ','').split(',')
level = self.mainWindow.levelEdit.value() - 1
#Get selected nodes
if indexes is None:
indexes = self.mainWindow.tree.selectedIndexesAndChildren(False, {'level': level,
'objecttype':objecttypes})
if (len(indexes) == 0):
return (False)
#Update progress window
self.mainWindow.logmessage(u"Start fetching data for {} node(s).".format(len(indexes)))
progress.setMaximum(len(indexes))
self.mainWindow.tree.treemodel.nodecounter = 0
#Init status messages
statuscount = {}
errorcount = 0
laststatus = None
laststatuscount = 0
allowedstatus = ['fetched (200)','downloaded (200)','stream'] #,'error (400)'
if apimodule == False:
apimodule = self.mainWindow.RequestTabs.currentWidget()
if options == False:
options = apimodule.getOptions()
options.update(globaloptions)
try:
#Spawn Threadpool
threadpool = ApiThreadPool(apimodule)
#Fill Input Queue
indexes = deque(indexes)
# for index in indexes:
# number += 1
# if not index.isValid():
# continue
#
# treenode = index.internalPointer()
# job = {'number': number, 'nodeindex': index, 'data': deepcopy(treenode.data),
# 'options': deepcopy(options)}
# threadpool.addJob(job)
threadpool.processJobs(options.get("threads",None))
#Process Input/Output Queue
while True:
try:
#Logging (sync logs in threads with main thread)
msg = threadpool.getLogMessage()
if msg is not None:
self.mainWindow.logmessage(msg)
#Jobs in
if (len(indexes) > 0):
index = indexes.popleft()
if index.isValid():
treenode = index.internalPointer()
job = {'nodeindex': index, 'data': deepcopy(treenode.data),
'options': deepcopy(options)}
threadpool.addJob(job)
if len(indexes) == 0:
threadpool.closeJobs()
progress.showInfo('remainingnodes',u"{} node(s) remaining.".format(threadpool.getJobCount() ))
#Jobs out
job = threadpool.getJob()
#-Finished all nodes...
if job is None:
break
#-Waiting...
elif 'waiting' in job:
time.sleep(0)
#-Finished one node...
elif 'progress' in job:
#Update progress
progress.step()
#-Add data...
elif not progress.wasCanceled:
if not job['nodeindex'].isValid():
continue
#add data
treenode = job['nodeindex'].internalPointer()
treenode.appendNodes(job['data'], job['options'], job['headers'], True)
#show status
status = job['options'].get('querystatus','empty')
count = 1 if not status in statuscount else statuscount[status]+1
statuscount[status] = count
progress.showInfo(status,u"{} response(s) with status: {}".format(count,status))
progress.showInfo('newnodes',u"{} new node(s) created".format(self.mainWindow.tree.treemodel.nodecounter))
progress.showInfo('threads',u"{} active thread(s)".format(threadpool.getThreadCount()))
progress.showInfo('remainingnodes',u"{} node(s) remaining.".format(threadpool.getJobCount() ))
#auto cancel after three consecutive errors
if (status != laststatus):
laststatus=status
laststatuscount = 1
else:
laststatuscount += 1
if not (laststatus in allowedstatus) and ((laststatuscount > (globaloptions['errors']-1)) or (laststatus == "rate limit (400)")):
threadpool.suspendJobs()
if laststatus == "rate limit (400)":
msg = "You reached the rate limit of the API. You are strongly advised to calm down and retry later."
timeout = 60 * 10 #10 minutes
else:
msg = "Something is wrong. {} consecutive errors occurred. You are strongly advised to check your settings.".format(laststatuscount)
timeout = 60 #1 minute
if RetryDialog.doContinue(msg,timeout,self.mainWindow) == QDialog.Accepted:
laststatuscount = 1
threadpool.resumeJobs()
else:
self.mainWindow.logmessage(u"Canceled because of {} consecutive errors or rate limit.".format(laststatuscount))
progress.cancel()
#Abort
if progress.wasCanceled:
progress.showInfo('cancel',u"Disconnecting from stream may take up to one minute.")
threadpool.stopJobs()
#break
finally:
QApplication.processEvents()
finally:
request_summary = [str(val)+" x "+key for key,val in statuscount.iteritems()]
request_summary = ", ".join(request_summary)
self.mainWindow.logmessage(u"Fetching completed, {} new node(s) created. Summary of responses: {}.".format(self.mainWindow.tree.treemodel.nodecounter,request_summary))
self.mainWindow.tree.treemodel.commitNewNodes()
finally:
progress.close()
@Slot()
def querySelectedNodes(self):
self.queryNodes()
@Slot()
def setupTimer(self):
#Get data
level = self.mainWindow.levelEdit.value() - 1
indexes = self.mainWindow.tree.selectedIndexesAndChildren(True, {'level': level,
'objecttype': ['seed', 'data', 'unpacked']})
module = self.mainWindow.RequestTabs.currentWidget()
options = module.getOptions()
#show timer window
self.mainWindow.timerWindow.setupTimer(
{'indexes': indexes, 'nodecount': len(indexes), 'module': module, 'options': options})
@Slot()
def timerStarted(self, time):
self.mainWindow.timerStatus.setStyleSheet("QLabel {color:red;}")
self.mainWindow.timerStatus.setText("Timer will be fired at " + time.toString("d MMM yyyy - hh:mm") + " ")
@Slot()
def timerStopped(self):
self.mainWindow.timerStatus.setStyleSheet("QLabel {color:black;}")
self.mainWindow.timerStatus.setText("Timer stopped ")
@Slot()
def timerCountdown(self, countdown):
self.mainWindow.timerStatus.setStyleSheet("QLabel {color:red;}")
self.mainWindow.timerStatus.setText("Timer will be fired in " + str(countdown) + " seconds ")
@Slot()
def timerFired(self, data):
self.mainWindow.timerStatus.setText("Timer fired ")
self.mainWindow.timerStatus.setStyleSheet("QLabel {color:red;}")
self.queryNodes(data.get('indexes', []), data.get('module', None), data.get('options', {}).copy())
@Slot()
def treeNodeSelected(self, current, selected):
#show details
self.mainWindow.detailTree.clear()
| |
not in params) or (params['batch'] is None):
raise ValueError("Missing the required parameter `batch` when calling `put_dispatch_tags_batch`")
collection_formats = {}
resource_path = '/dispatch/dispatches/tags'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'batch' in params:
body_params = params['batch']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['APP_NORMAL', 'OAUTH']
return self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def reset_destination_dispatch_registration_count(self, destination_id, **kwargs):
"""
DestinationsDispatchesRegistrationCount
Reset registration counts for all related dispatches.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.reset_destination_dispatch_registration_count(destination_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str destination_id: Identifier for the destination (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.reset_destination_dispatch_registration_count_with_http_info(destination_id, **kwargs)
else:
(data) = self.reset_destination_dispatch_registration_count_with_http_info(destination_id, **kwargs)
return data
def reset_destination_dispatch_registration_count_with_http_info(self, destination_id, **kwargs):
"""
DestinationsDispatchesRegistrationCount
Reset registration counts for all related dispatches.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.reset_destination_dispatch_registration_count_with_http_info(destination_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str destination_id: Identifier for the destination (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['destination_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method reset_destination_dispatch_registration_count" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'destination_id' is set
if ('destination_id' not in params) or (params['destination_id'] is None):
raise ValueError("Missing the required parameter `destination_id` when calling `reset_destination_dispatch_registration_count`")
collection_formats = {}
resource_path = '/dispatch/destinations/{destinationId}/dispatches/registrationCount'.replace('{format}', 'json')
path_params = {}
if 'destination_id' in params:
path_params['destinationId'] = params['destination_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['APP_NORMAL', 'OAUTH']
return self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def reset_dispatch_registration_count(self, dispatch_id, **kwargs):
"""
Reset registration count.
Reset the registration count for this dispatch.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.reset_dispatch_registration_count(dispatch_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str dispatch_id: Identifier for the dispatch (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.reset_dispatch_registration_count_with_http_info(dispatch_id, **kwargs)
else:
(data) = self.reset_dispatch_registration_count_with_http_info(dispatch_id, **kwargs)
return data
def reset_dispatch_registration_count_with_http_info(self, dispatch_id, **kwargs):
"""
Reset registration count.
Reset the registration count for this dispatch.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.reset_dispatch_registration_count_with_http_info(dispatch_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str dispatch_id: Identifier for the dispatch (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['dispatch_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method reset_dispatch_registration_count" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'dispatch_id' is set
if ('dispatch_id' not in params) or (params['dispatch_id'] is None):
raise ValueError("Missing the required parameter `dispatch_id` when calling `reset_dispatch_registration_count`")
collection_formats = {}
resource_path = '/dispatch/dispatches/{dispatchId}/registrationCount'.replace('{format}', 'json')
path_params = {}
if 'dispatch_id' in params:
path_params['dispatchId'] = params['dispatch_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['APP_NORMAL', 'OAUTH']
return self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def set_destination(self, destination_id, destination, **kwargs):
"""
Creates or updates the destination identified by the `destinationId` provided in the path. If the destination is being created, a name should be provided in the DestinationSchema, else an error will be thrown. You may also optionally supply the e-mail address of the user to be associated with this destination. This e-mail address should correspond to a SCORM Cloud user account. If you do not supply an e-mail address upon the creation of a destination, the owner of the Realm will be used. This can, of course, also be changed via calling this method to update an existing destination.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.set_destination(destination_id, destination, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str destination_id: Identifier for the destination (required)
:param DestinationSchema destination: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.set_destination_with_http_info(destination_id, destination, **kwargs)
else:
(data) = self.set_destination_with_http_info(destination_id, destination, **kwargs)
return data
def set_destination_with_http_info(self, destination_id, destination, **kwargs):
"""
Creates or updates the destination identified by the `destinationId` provided in the path. If the destination is being created, a name should be provided in the DestinationSchema, else an error will be thrown. You may also optionally supply the e-mail address of the user to be associated with this destination. This e-mail address should correspond to a SCORM Cloud user account. If you do not supply an e-mail address upon the creation of a destination, the owner of the Realm will be used. This can, of course, also be changed via calling this method to update an existing destination.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.set_destination_with_http_info(destination_id, destination, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str destination_id: Identifier for the destination (required)
:param DestinationSchema destination: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['destination_id', 'destination']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method set_destination" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'destination_id' is set
if ('destination_id' not in params) or (params['destination_id'] is None):
raise ValueError("Missing the required parameter `destination_id` when calling `set_destination`")
# verify the required parameter 'destination' is set
if ('destination' not in params) or (params['destination'] is None):
raise ValueError("Missing the required parameter `destination` when calling `set_destination`")
collection_formats = {}
resource_path = '/dispatch/destinations/{destinationId}'.replace('{format}', 'json')
path_params = {}
if 'destination_id' in params:
path_params['destinationId'] = params['destination_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'destination' in params:
body_params = params['destination']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['APP_NORMAL', 'OAUTH']
return self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def | |
metric of weight normalize are implemented. 'minmax' whether
to log normalize the weights and scale the weights between 1
and 2. 'log' whether to log normalize weight+1. 'log10' whether to
use log base 10 normalize weight+1.
null
If flag is set, then samples in condition1 and condition2 are shuffled
shuffleGenes
If flag is set, we run null tests by shuffling the genes instead of labels
seed
Seed for random sampling
Returns
-------
Y1
Pandas dataframe containing condition 1 expression data
Y2
Pandas dataframe containing condition 2 expression data
X1
Pandas dataframe containing TF expression across condition 1 samples
X2
Pandas dataframe containing TF expression across condition 2 samples
S1
Pandas dataframe containing network 1. Columns are TFs and rows are
regulated genes. Values are the weights of the TF-gene regulation.
S2
Pandas dataframe containing network 2. Columsn are TFs and rows are
regulated genes. Values are the weights of the TF-gene regulation.
conditioning_val
Soft thresholding value used to non TF-gene putative interactions
"""
Y1 = _exp_df(c1)
Y2 = _exp_df(c2)
S1, conditioning_val = _network_df(n1, Y1, conditioning,
weightNormalize=weightNormalize)
S2, _ = _network_df(n2, Y2, conditioning, weightNormalize=weightNormalize)
if n1 != n2:
genes = list(set(np.concatenate([S1.index, S2.index])))
tfs = list(set(np.concatenate([S1.columns, S2.columns])))
S1 = S1.ix[genes, tfs].fillna(conditioning_val)
S2 = S2.ix[genes, tfs].fillna(conditioning_val)
Y1 = Y1.ix[S1.index, :].fillna(0)
Y2 = Y2.ix[S2.index, :].fillna(0)
else:
Y1 = Y1.ix[S1.index, :].fillna(0)
Y2 = Y2.ix[S2.index, :].fillna(0)
X1 = Y1.ix[S1.columns, :]
X2 = Y2.ix[S2.columns, :]
_eval_indices(Y1, Y2, S1, S2)
if null:
Y1, Y2, X1, X2 = _shuffled_inputs(Y1, Y2, X1, X2,
seed=seed, shuffleGenes=shuffleGenes)
return (Y1, Y2, X1, X2, S1, S2, conditioning_val)
def _gcp(x, r):
"""To calculate the graph contrained penalty term.
Function that tensorflow uses to fold through each column of the matrix. It
goes through each TF column vector, finds target genes, creates weights
vector of the target genes, calculates pairwise difference of the weights,
performs tanh tranformation of the differences, and then sums the
difference.
Parameters
----------
x
is the previous value
r
Weights of the TF matrix
Returns
-------
float
Value with sum of the weight difference between the target genes
"""
ind = tf.where(tf.abs(r) > 0)
vec = tf.expand_dims(tf.gather_nd(r, ind), 0)
pc = tf.tanh(tf.transpose(vec)-vec)
val = tf.divide(tf.reduce_sum(tf.abs(pc)), 2)
return x+val
def run_model(Y, X, S, step, itr, log_itr, seed,
l_reg=1e-4, g_reg=1e-4, stopthreshold=0.01, val=0,
model='epee-gcl'):
"""To run sparse linear model.
There are two sparse linear model implemented: lasso and
graph-constrained-lasso. By default, method runs graph-constrained-lasso.
Parameters
----------
Y
Pandas dataframe containing expression data. Rows are genes and
columns are samples. Values are log(RPKM/TPM/FPKM + 1)
X
Pandas dataframe containing expression data of TFs. Rows are TFs and
columns are samples. Values are log(RPKM/TPM/FPKM + 1)
S
Pandas dataframe containing network. Rows are genes and columns are
TFs. Values are weight corresponding to the TF regulating a gene
step
learning rate for the optimizer
log_itr
Iterations to log the loss and percent change
seed
Setting the tensforflow random seed
l_reg
lasso regularization constant
g_reg
graph constrained regularization constant
stopthreshold
threshold when to stop learning the model if the loss change is 0.1
between the previous iteration and the current interation
val
weight given to the not known TF-gene pairs
model
model to use for regulator and perturbed gene inference score
Returns
-------
curr_y
Inferred Y
curr_w
Inferred W
loss_arr
Loss per each iteration
"""
tf.set_random_seed(seed)
genes, samples = Y.shape
regulators = X.shape[0]
S_h = np.copy(S)
S_h = np.float32(S_h > val)
with tf.Graph().as_default():
w = tf.Variable(
tf.random_gamma([genes, regulators], alpha=1, beta=30,
dtype=tf.float32, seed=seed))
if model == 'no-penalty':
# least squares loss along with L1 regularization
y = tf.matmul(tf.multiply(w, S), X)
# loss
loss = tf.reduce_mean(tf.square(Y - y))
if model == 'epee-l':
# least squares loss along with L1 regularization
y = tf.matmul(tf.multiply(w, S), X)
# loss
loss = tf.reduce_mean(
tf.square(Y - y))+tf.multiply(tf.reduce_sum(tf.abs(w)), l_reg)
if model == 'epee-gcl':
y = tf.matmul(tf.multiply(w, S), X)
wa = tf.transpose(
tf.multiply(tf.expand_dims(w, 1), tf.expand_dims(S_h, 1)))
gc = tf.foldl(
_gcp, wa, initializer=tf.constant(0, dtype=tf.float32),
parallel_iterations=10, back_prop=False, swap_memory=True)
loss = tf.reduce_mean(
tf.square(Y-y)) + tf.multiply(
tf.reduce_sum(tf.abs(w)), l_reg) + tf.multiply(gc, g_reg)
# optimizer
optimizer = tf.train.AdamOptimizer(learning_rate=step, epsilon=1e-8)
train = optimizer.minimize(loss)
# training loop
init = tf.global_variables_initializer() # before starting init var
sess = tf.Session() # launch the graph
sess.run(init) # reset values to wrong
loss_arr = []
# outarr = []
for s in range(itr):
sess.run(train)
if s % 100 == 0:
curr_loss = sess.run(loss)
if np.isnan(curr_loss):
raise RuntimeError('NAN value is computed for the loss.\
Make sure that the inputs are RPKM, TPM, FPKM normalized\
without any transformation (ie. log). Also can try to \
lower the learning rate.')
loss_arr.append(curr_loss)
if len(loss_arr) >= 2:
delta = loss_arr[-2] - loss_arr[-1]
logging.debug((s, curr_loss, delta))
if delta < 1:
logging.debug("TRANING FINISHED")
break
else:
logging.debug((s, curr_loss))
curr_w, curr_y, curr_loss = sess.run([w, y, loss])
return (curr_y, curr_w, loss_arr)
def get_perturb_scores(Y1, y1, X1, w1, Y2, y2, X2, w2, S1, S2):
"""To get perturb scores.
The funtion calculates the log likelihood ratio of error by swapping the
weights between condition and error within same condition. The function
returns sorted dataframe containing perturbation score.
Parameters
----------
Y1
Pandas dataframe containing condition 1 expression data
y1
Pandas dataframe containing inferred condition 1 expression data
X1
Pandas dataframe containing TF expression across condition 1 samples
w1
Pandas dataframe containing inferred TF-gene weights for condition 1
Y2
Pandas dataframe containing condition 2 expression data
y2
Pandas dataframe containing inferred condition 2 expression data
X2
Pandas dataframe containing TF expression across condition 2 samples
w2
Pandas dataframe containing inferred TF-gene weights for condition 2
S1
Pandas dataframe containing network 1. Columns are TFs and rows are
regulated genes. Values are the weights of the TF-gene regulation.
S2
Pandas dataframe containing network 2. Columsn are TFs and rows are
regulated genes. Values are the weights of the TF-gene regulation.
Returns
-------
scores_df_sorted
Pandas dataframe containing perturb scores
"""
err11 = np.square(Y1-y1).sum(axis=1)
err22 = np.square(Y2-y2).sum(axis=1)
err12 = np.square(Y1-np.dot(np.multiply(w2, S2), X1)).sum(axis=1)
err21 = np.square(Y2-np.dot(np.multiply(w1, S1), X2)).sum(axis=1)
base = err11+err22
err = err21+err12
scores = err/base
# sort_scores_idx = sorted(range(len(scores)), key=lambda k: scores[k])
scores_df = pd.DataFrame({'gene': scores.index, 'score': scores})
scores_df_sorted = scores_df.sort_values(by='score', ascending=False)
return scores_df_sorted
def get_summary_scoresdf(df, metric='sum'):
"""To calculate scores from multiple models.
Each independent model generates perturb and regulatory score. The function
calculates summary score for each perturb gene and assigns that score to
the gene. The function returns dataframe with summary scores sorted.
Parameters
----------
df
Pandas dataframe containing scores from multiple models
metric
Metric used to summarize the score from multiple models.
'sum', 'mean' and 'median' are valid options. Default = 'sum'
Returns
-------
out_df_sort
Pandas dataframe containing the summarized scores
"""
if metric == 'median':
df_score = df.iloc[:, 1:].median(axis=1)
if metric == 'sum':
df_score = df.iloc[:, 1:].sum(axis=1)
if metric == 'mean':
df_score = df.iloc[:, 1:].mean(axis=1)
out_df = pd.DataFrame({'gene': df['gene'], 'score': df_score})
out_df_sort = out_df.sort_values(by='score', ascending=False)
out_df_sort.reset_index(inplace=True, drop=True)
return out_df_sort
def get_weights_df(w, genes, tfs):
"""To name the rows and columns of the weight numpy ndarray object.
Functions converts the numpy ndarray object to Pandas dataframe with
labeled rows and columns.
Parameters
----------
w
numpy ndarray containing the inferred TF-gene weights
genes
rownames of the w
tfs
column names of the w
Returns
-------
df
Pandas dataframe containing TF-gene inferred weights W
"""
df = pd.DataFrame(w)
df.columns = tfs
df.index = genes
return df
def get_regulator_scores(perturb_genes, W):
"""To provide regulator score given weights and perturb genes.
Function calculates the regulator score given list of perturb genes and
inferred W.
Parameters
----------
perturb_genes
list of perturb genes
W
Pandas dataframe containing TF-gene inferred weights W
Returns
-------
sort_scores_df
Pandas dataframe containing the regulatory scores
"""
# perturb_genes
W_perturb = W.iloc[W.index.isin(perturb_genes), ]
# not perturb_genes
W_notperturb = W.iloc[~W.index.isin(perturb_genes), ]
reg_score_outarr = []
for reg in W.columns:
num_score = np.sum(W_perturb[reg])
not_reg_df = W_perturb.iloc[:, ~W_perturb.columns.isin([reg])]
dem_score1 = not_reg_df.sum().sum()
dem_score2 = np.sum(W_notperturb[reg])
if dem_score1 == 0:
dem_score1 = 1e-6
if num_score == 0:
score = 0
else:
score = (num_score/np.sqrt(dem_score1*dem_score2))
reg_score_outarr.append(score)
score_df | |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from .. import models
class PoolOperations(object):
"""PoolOperations operations.
You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client API Version. Constant value: "2020-09-01.12.0".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2020-09-01.12.0"
self.config = config
def list_usage_metrics(
self, pool_list_usage_metrics_options=None, custom_headers=None, raw=False, **operation_config):
"""Lists the usage metrics, aggregated by Pool across individual time
intervals, for the specified Account.
If you do not specify a $filter clause including a poolId, the response
includes all Pools that existed in the Account in the time range of the
returned aggregation intervals. If you do not specify a $filter clause
including a startTime or endTime these filters default to the start and
end times of the last aggregation interval currently available; that
is, only the last aggregation interval is returned.
:param pool_list_usage_metrics_options: Additional parameters for the
operation
:type pool_list_usage_metrics_options:
~azure.batch.models.PoolListUsageMetricsOptions
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of PoolUsageMetrics
:rtype:
~azure.batch.models.PoolUsageMetricsPaged[~azure.batch.models.PoolUsageMetrics]
:raises:
:class:`BatchErrorException<azure.batch.models.BatchErrorException>`
"""
start_time = None
if pool_list_usage_metrics_options is not None:
start_time = pool_list_usage_metrics_options.start_time
end_time = None
if pool_list_usage_metrics_options is not None:
end_time = pool_list_usage_metrics_options.end_time
filter = None
if pool_list_usage_metrics_options is not None:
filter = pool_list_usage_metrics_options.filter
max_results = None
if pool_list_usage_metrics_options is not None:
max_results = pool_list_usage_metrics_options.max_results
timeout = None
if pool_list_usage_metrics_options is not None:
timeout = pool_list_usage_metrics_options.timeout
client_request_id = None
if pool_list_usage_metrics_options is not None:
client_request_id = pool_list_usage_metrics_options.client_request_id
return_client_request_id = None
if pool_list_usage_metrics_options is not None:
return_client_request_id = pool_list_usage_metrics_options.return_client_request_id
ocp_date = None
if pool_list_usage_metrics_options is not None:
ocp_date = pool_list_usage_metrics_options.ocp_date
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list_usage_metrics.metadata['url']
path_format_arguments = {
'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if start_time is not None:
query_parameters['starttime'] = self._serialize.query("start_time", start_time, 'iso-8601')
if end_time is not None:
query_parameters['endtime'] = self._serialize.query("end_time", end_time, 'iso-8601')
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
if max_results is not None:
query_parameters['maxresults'] = self._serialize.query("max_results", max_results, 'int', maximum=1000, minimum=1)
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
if client_request_id is not None:
header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str')
if return_client_request_id is not None:
header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool')
if ocp_date is not None:
header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def internal_paging(next_link=None):
request = prepare_request(next_link)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.BatchErrorException(self._deserialize, response)
return response
# Deserialize response
header_dict = None
if raw:
header_dict = {}
deserialized = models.PoolUsageMetricsPaged(internal_paging, self._deserialize.dependencies, header_dict)
return deserialized
list_usage_metrics.metadata = {'url': '/poolusagemetrics'}
def get_all_lifetime_statistics(
self, pool_get_all_lifetime_statistics_options=None, custom_headers=None, raw=False, **operation_config):
"""Gets lifetime summary statistics for all of the Pools in the specified
Account.
Statistics are aggregated across all Pools that have ever existed in
the Account, from Account creation to the last update time of the
statistics. The statistics may not be immediately available. The Batch
service performs periodic roll-up of statistics. The typical delay is
about 30 minutes.
:param pool_get_all_lifetime_statistics_options: Additional parameters
for the operation
:type pool_get_all_lifetime_statistics_options:
~azure.batch.models.PoolGetAllLifetimeStatisticsOptions
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: PoolStatistics or ClientRawResponse if raw=true
:rtype: ~azure.batch.models.PoolStatistics or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`BatchErrorException<azure.batch.models.BatchErrorException>`
"""
timeout = None
if pool_get_all_lifetime_statistics_options is not None:
timeout = pool_get_all_lifetime_statistics_options.timeout
client_request_id = None
if pool_get_all_lifetime_statistics_options is not None:
client_request_id = pool_get_all_lifetime_statistics_options.client_request_id
return_client_request_id = None
if pool_get_all_lifetime_statistics_options is not None:
return_client_request_id = pool_get_all_lifetime_statistics_options.return_client_request_id
ocp_date = None
if pool_get_all_lifetime_statistics_options is not None:
ocp_date = pool_get_all_lifetime_statistics_options.ocp_date
# Construct URL
url = self.get_all_lifetime_statistics.metadata['url']
path_format_arguments = {
'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
if client_request_id is not None:
header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str')
if return_client_request_id is not None:
header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool')
if ocp_date is not None:
header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.BatchErrorException(self._deserialize, response)
header_dict = {}
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PoolStatistics', response)
header_dict = {
'client-request-id': 'str',
'request-id': 'str',
'ETag': 'str',
'Last-Modified': 'rfc-1123',
}
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
client_raw_response.add_headers(header_dict)
return client_raw_response
return deserialized
get_all_lifetime_statistics.metadata = {'url': '/lifetimepoolstats'}
def add(
self, pool, pool_add_options=None, custom_headers=None, raw=False, **operation_config):
"""Adds a Pool to the specified Account.
When naming Pools, avoid including sensitive information such as user
names or secret project names. This information may appear in telemetry
logs accessible to Microsoft Support engineers.
:param pool: The Pool to be added.
:type pool: ~azure.batch.models.PoolAddParameter
:param pool_add_options: Additional parameters for the operation
:type pool_add_options: ~azure.batch.models.PoolAddOptions
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`BatchErrorException<azure.batch.models.BatchErrorException>`
"""
timeout = None
if pool_add_options is not None:
timeout = pool_add_options.timeout
client_request_id = None
if pool_add_options is not None:
client_request_id = pool_add_options.client_request_id
return_client_request_id = None
if pool_add_options is not None:
return_client_request_id = pool_add_options.return_client_request_id
ocp_date = None
if pool_add_options is not None:
ocp_date = pool_add_options.ocp_date
# Construct URL
url = self.add.metadata['url']
path_format_arguments = {
'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
if client_request_id is not None:
header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str')
if return_client_request_id is not None:
header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool')
if ocp_date is not None:
header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123')
# Construct body
body_content = self._serialize.body(pool, 'PoolAddParameter')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [201]:
raise models.BatchErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
client_raw_response.add_headers({
'client-request-id': 'str',
'request-id': 'str',
'ETag': 'str',
'Last-Modified': 'rfc-1123',
'DataServiceId': 'str',
})
return client_raw_response
add.metadata = {'url': '/pools'}
def list(
self, pool_list_options=None, custom_headers=None, raw=False, **operation_config):
"""Lists all of the Pools in the specified Account.
:param pool_list_options: Additional parameters for the operation
:type pool_list_options: ~azure.batch.models.PoolListOptions
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of CloudPool
:rtype:
~azure.batch.models.CloudPoolPaged[~azure.batch.models.CloudPool]
:raises:
:class:`BatchErrorException<azure.batch.models.BatchErrorException>`
"""
filter = None
if pool_list_options is not None:
filter = pool_list_options.filter
select = None
if pool_list_options is not None:
select = pool_list_options.select
expand = None
| |
<reponame>noorbeast/BlenderSource<filename>release/scripts/addons_contrib/sun_position/sun_calc.py<gh_stars>1-10
from mathutils import *
import math
import datetime
from . properties import *
Degrees = "\xb0"
def format_time(theTime, UTCzone, daylightSavings, longitude):
hh = str(int(theTime))
min = (theTime - int(theTime)) * 60
sec = int((min - int(min)) * 60)
mm = "0" + str(int(min)) if min < 10 else str(int(min))
ss = "0" + str(sec) if sec < 10 else str(sec)
zone = UTCzone
if(longitude < 0):
zone *= -1
if daylightSavings:
zone += 1
gt = int(theTime) - zone
if gt < 0:
gt = 24 + gt
elif gt > 23:
gt = gt - 24
gt = str(gt)
return ("Local: " + hh + ":" + mm + ":" + ss,
"UTC: " + gt + ":" + mm + ":" + ss)
def format_hms(theTime):
hh = str(int(theTime))
min = (theTime - int(theTime)) * 60
sec = int((min - int(min)) * 60)
mm = "0" + str(int(min)) if min < 10 else str(int(min))
ss = "0" + str(sec) if sec < 10 else str(sec)
return (hh + ":" + mm + ":" + ss)
def format_lat_long(latLong, isLatitude):
hh = str(abs(int(latLong)))
min = abs((latLong - int(latLong)) * 60)
sec = abs(int((min - int(min)) * 60))
mm = "0" + str(int(min)) if min < 10 else str(int(min))
ss = "0" + str(sec) if sec < 10 else str(sec)
if latLong == 0:
coordTag = " "
else:
if isLatitude:
coordTag = " N" if latLong > 0 else " S"
else:
coordTag = " E" if latLong > 0 else " W"
return hh + Degrees + " " + mm + "' " + ss + '"' + coordTag
############################################################################
#
# PlaceSun() will cycle through all the selected objects of type LAMP or
# MESH and call setSunPosition to place them in the sky.
#
############################################################################
def Move_sun():
if Sun.PP.UsageMode == "HDR":
Sun.Theta = math.pi / 2 - degToRad(Sun.Elevation)
Sun.Phi = -Sun.Azimuth
locX = math.sin(Sun.Phi) * math.sin(-Sun.Theta) * Sun.SunDistance
locY = math.sin(Sun.Theta) * math.cos(Sun.Phi) * Sun.SunDistance
locZ = math.cos(Sun.Theta) * Sun.SunDistance
try:
nt = bpy.context.scene.world.node_tree.nodes
envTex = nt.get(Sun.HDR_texture)
if Sun.Bind.azDiff and envTex.texture_mapping.rotation.z == 0.0:
envTex.texture_mapping.rotation.z = Sun.Bind.azDiff
if envTex and Sun.BindToSun:
az = Sun.Azimuth
if Sun.Bind.azStart < az:
taz = az - Sun.Bind.azStart
else:
taz = -(Sun.Bind.azStart - az)
envTex.texture_mapping.rotation.z += taz
Sun.Bind.azStart = az
obj = bpy.context.scene.objects.get(Sun.SunObject)
try:
obj.location = locX, locY, locZ
except:
pass
if obj.type == 'LAMP':
obj.rotation_euler = (
(math.radians(Sun.Elevation - 90), 0, -Sun.Azimuth))
except:
pass
return True
totalObjects = len(Sun.Selected_objects)
localTime = Sun.Time
if Sun.Longitude > 0:
zone = Sun.UTCzone * -1
else:
zone = Sun.UTCzone
if Sun.DaylightSavings:
zone -= 1
northOffset = radToDeg(Sun.NorthOffset)
if Sun.ShowRiseSet:
calcSunrise_Sunset(1)
calcSunrise_Sunset(0)
getSunPosition(None, localTime, Sun.Latitude, Sun.Longitude,
northOffset, zone, Sun.Month, Sun.Day, Sun.Year,
Sun.SunDistance)
if Sun.UseSkyTexture:
try:
nt = bpy.context.scene.world.node_tree.nodes
sunTex = nt.get(Sun.SkyTexture)
if sunTex:
locX = math.sin(Sun.Phi) * math.sin(-Sun.Theta)
locY = math.sin(Sun.Theta) * math.cos(Sun.Phi)
locZ = math.cos(Sun.Theta)
sunTex.texture_mapping.rotation.z = 0.0
sunTex.sun_direction = locX, locY, locZ
except:
pass
if Sun.UseSunObject:
try:
obj = bpy.context.scene.objects.get(Sun.SunObject)
setSunPosition(obj, Sun.SunDistance)
if obj.type == 'LAMP':
obj.rotation_euler = (
(math.radians(Sun.Elevation - 90), 0,
math.radians(-Sun.AzNorth)))
except:
pass
if totalObjects < 1 or not Sun.UseObjectGroup:
return False
if Sun.ObjectGroup == 'ECLIPTIC':
# Ecliptic
if totalObjects > 1:
timeIncrement = Sun.TimeSpread / (totalObjects - 1)
localTime = localTime + timeIncrement * (totalObjects - 1)
else:
timeIncrement = Sun.TimeSpread
for obj in Sun.Selected_objects:
mesh = obj.type
if mesh == 'LAMP' or mesh == 'MESH':
getSunPosition(obj,
localTime,
Sun.Latitude, Sun.Longitude,
northOffset, zone,
Sun.Month, Sun.Day, Sun.Year,
Sun.SunDistance)
setSunPosition(obj, Sun.SunDistance)
localTime = localTime - timeIncrement
if mesh == 'LAMP':
obj.rotation_euler = (
(math.radians(Sun.Elevation - 90), 0,
math.radians(-Sun.AzNorth)))
else:
# Analemma
dayIncrement = 365 / totalObjects
day = Sun.Day_of_year + dayIncrement * (totalObjects - 1)
for obj in Sun.Selected_objects:
mesh = obj.type
if mesh == 'LAMP' or mesh == 'MESH':
dt = (datetime.date(Sun.Year, 1, 1) +
datetime.timedelta(day - 1))
getSunPosition(obj, localTime,
Sun.Latitude, Sun.Longitude,
northOffset, zone, dt.month, dt.day,
Sun.Year, Sun.SunDistance)
setSunPosition(obj, Sun.SunDistance)
day -= dayIncrement
if mesh == 'LAMP':
obj.rotation_euler = (
(math.radians(Sun.Elevation - 90), 0,
math.radians(-Sun.AzNorth)))
return True
############################################################################
#
# Calculate the actual position of the sun based on input parameters.
#
# The sun positioning algorithms below are based on the National Oceanic
# and Atmospheric Administration's (NOAA) Solar Position Calculator
# which rely on calculations of Jean Meeus' book "Astronomical Algorithms."
# Use of NOAA data and products are in the public domain and may be used
# freely by the public as outlined in their policies at
# www.nws.noaa.gov/disclaimer.php
#
# The calculations of this script can be verified with those of NOAA's
# using the Azimuth and Solar Elevation displayed in the SunPos_Panel.
# NOAA's web site is:
# http://www.esrl.noaa.gov/gmd/grad/solcalc
############################################################################
def getSunPosition(obj, localTime, latitude, longitude, northOffset,
utcZone, month, day, year, distance):
longitude *= -1 # for internal calculations
utcTime = localTime + utcZone # Set Greenwich Meridian Time
if latitude > 89.93: # Latitude 90 and -90 gives
latitude = degToRad(89.93) # erroneous results so nudge it
elif latitude < -89.93:
latitude = degToRad(-89.93)
else:
latitude = degToRad(latitude)
t = julianTimeFromY2k(utcTime, year, month, day)
e = degToRad(obliquityCorrection(t))
L = apparentLongitudeOfSun(t)
solarDec = sunDeclination(e, L)
eqtime = calcEquationOfTime(t)
timeCorrection = (eqtime - 4 * longitude) + 60 * utcZone
trueSolarTime = ((utcTime - utcZone) * 60.0 + timeCorrection) % 1440
hourAngle = trueSolarTime / 4.0 - 180.0
if hourAngle < -180.0:
hourAngle += 360.0
csz = (math.sin(latitude) * math.sin(solarDec) +
math.cos(latitude) * math.cos(solarDec) *
math.cos(degToRad(hourAngle)))
if csz > 1.0:
csz = 1.0
elif csz < -1.0:
csz = -1.0
zenith = math.acos(csz)
azDenom = math.cos(latitude) * math.sin(zenith)
if abs(azDenom) > 0.001:
azRad = ((math.sin(latitude) *
math.cos(zenith)) - math.sin(solarDec)) / azDenom
if abs(azRad) > 1.0:
azRad = -1.0 if (azRad < 0.0) else 1.0
azimuth = 180.0 - radToDeg(math.acos(azRad))
if hourAngle > 0.0:
azimuth = -azimuth
else:
azimuth = 180.0 if (latitude > 0.0) else 0.0
if azimuth < 0.0:
azimuth = azimuth + 360.0
exoatmElevation = 90.0 - radToDeg(zenith)
if exoatmElevation > 85.0:
refractionCorrection = 0.0
else:
te = math.tan(degToRad(exoatmElevation))
if exoatmElevation > 5.0:
refractionCorrection = (
58.1 / te - 0.07 / (te ** 3) + 0.000086 / (te ** 5))
elif (exoatmElevation > -0.575):
s1 = (-12.79 + exoatmElevation * 0.711)
s2 = (103.4 + exoatmElevation * (s1))
s3 = (-518.2 + exoatmElevation * (s2))
refractionCorrection = 1735.0 + exoatmElevation * (s3)
else:
refractionCorrection = -20.774 / te
refractionCorrection = refractionCorrection / 3600
if Sun.ShowRefraction:
solarElevation = 90.0 - (radToDeg(zenith) - refractionCorrection)
else:
solarElevation = 90.0 - radToDeg(zenith)
solarAzimuth = azimuth + northOffset
Sun.AzNorth = solarAzimuth
Sun.Theta = math.pi / 2 - degToRad(solarElevation)
Sun.Phi = degToRad(solarAzimuth) * -1
Sun.Azimuth = azimuth
Sun.Elevation = solarElevation
def setSunPosition(obj, distance):
locX = math.sin(Sun.Phi) * math.sin(-Sun.Theta) * distance
locY = math.sin(Sun.Theta) * math.cos(Sun.Phi) * distance
locZ = math.cos(Sun.Theta) * distance
#----------------------------------------------
# Update selected object in viewport
#----------------------------------------------
try:
obj.location = locX, locY, locZ
except:
pass
def calcSunriseSetUTC(rise, jd, latitude, longitude):
t = calcTimeJulianCent(jd)
eqTime = calcEquationOfTime(t)
solarDec = calcSunDeclination(t)
hourAngle = calcHourAngleSunrise(latitude, solarDec)
if not rise:
hourAngle = -hourAngle
delta = longitude + radToDeg(hourAngle)
timeUTC = 720 - (4.0 * delta) - eqTime
return timeUTC
def calcSunDeclination(t):
e = degToRad(obliquityCorrection(t))
L = apparentLongitudeOfSun(t)
solarDec = sunDeclination(e, L)
return solarDec
def calcHourAngleSunrise(lat, solarDec):
latRad = degToRad(lat)
HAarg = (math.cos(degToRad(90.833)) /
(math.cos(latRad) * math.cos(solarDec))
- math.tan(latRad) * math.tan(solarDec))
if HAarg < -1.0:
HAarg = -1.0
elif HAarg > 1.0:
HAarg = 1.0
HA = math.acos(HAarg)
return HA
def calcSolarNoon(jd, longitude, timezone, dst):
t = calcTimeJulianCent(jd - longitude / 360.0)
eqTime = calcEquationOfTime(t)
noonOffset = 720.0 - (longitude * 4.0) - eqTime
newt = calcTimeJulianCent(jd + noonOffset / 1440.0)
eqTime = calcEquationOfTime(newt)
nv = 780.0 if dst else 720.0
noonLocal = (nv - (longitude * 4.0) - eqTime + (timezone * 60.0)) % 1440
Sun.SolarNoon.time = noonLocal / 60.0
def calcSunrise_Sunset(rise):
if Sun.Longitude > 0:
zone = Sun.UTCzone * -1
else:
zone = Sun.UTCzone
jd = getJulianDay(Sun.Year, Sun.Month, Sun.Day)
timeUTC = calcSunriseSetUTC(rise, jd, Sun.Latitude, Sun.Longitude)
newTimeUTC = calcSunriseSetUTC(rise, jd + timeUTC / 1440.0,
Sun.Latitude, Sun.Longitude)
timeLocal = newTimeUTC + (-zone | |
10:
log_contents = log_capture_string.getvalue()
if len(log_contents) > 0:
break
ray._private.import_thread.logger.removeHandler(ch)
assert "actor" in log_contents
assert "has been exported {} times.".format(
ray_constants.DUPLICATE_REMOTE_FUNCTION_THRESHOLD) in log_contents
def test_redis_module_failure(ray_start_regular):
address_info = ray_start_regular
address = address_info["redis_address"]
address = address.split(":")
assert len(address) == 2
def run_failure_test(expecting_message, *command):
with pytest.raises(
Exception, match=".*{}.*".format(expecting_message)):
client = redis.StrictRedis(
host=address[0],
port=int(address[1]),
password=ray_constants.REDIS_DEFAULT_PASSWORD)
client.execute_command(*command)
def run_one_command(*command):
client = redis.StrictRedis(
host=address[0],
port=int(address[1]),
password=ray_constants.REDIS_DEFAULT_PASSWORD)
client.execute_command(*command)
run_failure_test("wrong number of arguments", "RAY.TABLE_ADD", 13)
run_failure_test("Prefix must be in the TablePrefix range",
"RAY.TABLE_ADD", 100000, 1, 1, 1)
run_failure_test("Prefix must be in the TablePrefix range",
"RAY.TABLE_REQUEST_NOTIFICATIONS", 100000, 1, 1, 1)
run_failure_test("Prefix must be a valid TablePrefix integer",
"RAY.TABLE_ADD", b"a", 1, 1, 1)
run_failure_test("Pubsub channel must be in the TablePubsub range",
"RAY.TABLE_ADD", 1, 10000, 1, 1)
run_failure_test("Pubsub channel must be a valid integer", "RAY.TABLE_ADD",
1, b"a", 1, 1)
# Change the key from 1 to 2, since the previous command should have
# succeeded at writing the key, but not publishing it.
run_failure_test("Index is less than 0.", "RAY.TABLE_APPEND", 1, 1, 2, 1,
-1)
run_failure_test("Index is not a number.", "RAY.TABLE_APPEND", 1, 1, 2, 1,
b"a")
run_one_command("RAY.TABLE_APPEND", 1, 1, 2, 1)
# It's okay to add duplicate entries.
run_one_command("RAY.TABLE_APPEND", 1, 1, 2, 1)
run_one_command("RAY.TABLE_APPEND", 1, 1, 2, 1, 0)
run_one_command("RAY.TABLE_APPEND", 1, 1, 2, 1, 1)
run_one_command("RAY.SET_ADD", 1, 1, 3, 1)
# It's okey to add duplicate entries.
run_one_command("RAY.SET_ADD", 1, 1, 3, 1)
run_one_command("RAY.SET_REMOVE", 1, 1, 3, 1)
# It's okey to remove duplicate entries.
run_one_command("RAY.SET_REMOVE", 1, 1, 3, 1)
# Note that this test will take at least 10 seconds because it must wait for
# the monitor to detect enough missed heartbeats.
def test_warning_for_dead_node(ray_start_cluster_2_nodes, error_pubsub):
cluster = ray_start_cluster_2_nodes
cluster.wait_for_nodes()
p = error_pubsub
node_ids = {item["NodeID"] for item in ray.nodes()}
# Try to make sure that the monitor has received at least one heartbeat
# from the node.
time.sleep(0.5)
# Kill both raylets.
cluster.list_all_nodes()[1].kill_raylet()
cluster.list_all_nodes()[0].kill_raylet()
# Check that we get warning messages for both raylets.
errors = get_error_message(p, 2, ray_constants.REMOVED_NODE_ERROR, 40)
# Extract the client IDs from the error messages. This will need to be
# changed if the error message changes.
warning_node_ids = {error.error_message.split(" ")[5] for error in errors}
assert node_ids == warning_node_ids
def test_warning_for_dead_autoscaler(ray_start_regular, error_pubsub):
# Terminate the autoscaler process.
from ray.worker import _global_node
autoscaler_process = _global_node.all_processes[
ray_constants.PROCESS_TYPE_MONITOR][0].process
autoscaler_process.terminate()
# Confirm that we receive an autoscaler failure error.
errors = get_error_message(
error_pubsub, 1, ray_constants.MONITOR_DIED_ERROR, timeout=5)
assert len(errors) == 1
# Confirm that the autoscaler failure error is stored.
error = _internal_kv_get(DEBUG_AUTOSCALING_ERROR)
assert error is not None
def test_raylet_crash_when_get(ray_start_regular):
def sleep_to_kill_raylet():
# Don't kill raylet before default workers get connected.
time.sleep(2)
ray.worker._global_node.kill_raylet()
object_ref = ray.put(np.zeros(200 * 1024, dtype=np.uint8))
ray.internal.free(object_ref)
thread = threading.Thread(target=sleep_to_kill_raylet)
thread.start()
with pytest.raises(ray.exceptions.ObjectLostError):
ray.get(object_ref)
thread.join()
def test_connect_with_disconnected_node(shutdown_only):
config = {
"num_heartbeats_timeout": 50,
"raylet_heartbeat_period_milliseconds": 10,
}
cluster = Cluster()
cluster.add_node(num_cpus=0, _system_config=config)
ray.init(address=cluster.address)
p = init_error_pubsub()
errors = get_error_message(p, 1, timeout=5)
assert len(errors) == 0
# This node is killed by SIGKILL, ray_monitor will mark it to dead.
dead_node = cluster.add_node(num_cpus=0)
cluster.remove_node(dead_node, allow_graceful=False)
errors = get_error_message(p, 1, ray_constants.REMOVED_NODE_ERROR)
assert len(errors) == 1
# This node is killed by SIGKILL, ray_monitor will mark it to dead.
dead_node = cluster.add_node(num_cpus=0)
cluster.remove_node(dead_node, allow_graceful=False)
errors = get_error_message(p, 1, ray_constants.REMOVED_NODE_ERROR)
assert len(errors) == 1
# This node is killed by SIGTERM, ray_monitor will not mark it again.
removing_node = cluster.add_node(num_cpus=0)
cluster.remove_node(removing_node, allow_graceful=True)
errors = get_error_message(p, 1, timeout=2)
assert len(errors) == 0
# There is no connection error to a dead node.
errors = get_error_message(p, 1, timeout=2)
assert len(errors) == 0
p.close()
@pytest.mark.parametrize(
"ray_start_cluster_head", [{
"num_cpus": 5,
"object_store_memory": 10**8,
}],
indirect=True)
def test_parallel_actor_fill_plasma_retry(ray_start_cluster_head):
@ray.remote
class LargeMemoryActor:
def some_expensive_task(self):
return np.zeros(10**8 // 2, dtype=np.uint8)
actors = [LargeMemoryActor.remote() for _ in range(5)]
for _ in range(10):
pending = [a.some_expensive_task.remote() for a in actors]
while pending:
[done], pending = ray.wait(pending, num_returns=1)
def test_fill_object_store_exception(shutdown_only):
ray.init(
num_cpus=2,
object_store_memory=10**8,
_system_config={"automatic_object_spilling_enabled": False})
@ray.remote
def expensive_task():
return np.zeros((10**8) // 10, dtype=np.uint8)
with pytest.raises(ray.exceptions.RayTaskError) as e:
ray.get([expensive_task.remote() for _ in range(20)])
with pytest.raises(ray.exceptions.ObjectStoreFullError):
raise e.as_instanceof_cause()
@ray.remote
class LargeMemoryActor:
def some_expensive_task(self):
return np.zeros(10**8 + 2, dtype=np.uint8)
def test(self):
return 1
actor = LargeMemoryActor.remote()
with pytest.raises(ray.exceptions.RayTaskError):
ray.get(actor.some_expensive_task.remote())
# Make sure actor does not die
ray.get(actor.test.remote())
with pytest.raises(ray.exceptions.ObjectStoreFullError):
ray.put(np.zeros(10**8 + 2, dtype=np.uint8))
@pytest.mark.parametrize(
"ray_start_cluster", [{
"num_nodes": 1,
"num_cpus": 2,
}, {
"num_nodes": 2,
"num_cpus": 1,
}],
indirect=True)
def test_eviction(ray_start_cluster):
@ray.remote
def large_object():
return np.zeros(10 * 1024 * 1024)
obj = large_object.remote()
assert (isinstance(ray.get(obj), np.ndarray))
# Evict the object.
ray.internal.free([obj])
# ray.get throws an exception.
with pytest.raises(ray.exceptions.ObjectLostError):
ray.get(obj)
@ray.remote
def dependent_task(x):
return
# If the object is passed by reference, the task throws an
# exception.
with pytest.raises(ray.exceptions.RayTaskError):
ray.get(dependent_task.remote(obj))
@pytest.mark.parametrize(
"ray_start_cluster", [{
"num_nodes": 2,
"num_cpus": 1,
}, {
"num_nodes": 1,
"num_cpus": 2,
}],
indirect=True)
def test_serialized_id(ray_start_cluster):
@ray.remote
def small_object():
# Sleep a bit before creating the object to force a timeout
# at the getter.
time.sleep(1)
return 1
@ray.remote
def dependent_task(x):
return x
@ray.remote
def get(obj_refs, test_dependent_task):
print("get", obj_refs)
obj_ref = obj_refs[0]
if test_dependent_task:
assert ray.get(dependent_task.remote(obj_ref)) == 1
else:
assert ray.get(obj_ref) == 1
obj = small_object.remote()
ray.get(get.remote([obj], False))
obj = small_object.remote()
ray.get(get.remote([obj], True))
obj = ray.put(1)
ray.get(get.remote([obj], False))
obj = ray.put(1)
ray.get(get.remote([obj], True))
@pytest.mark.parametrize("use_actors,node_failure",
[(False, False), (False, True), (True, False),
(True, True)])
def test_fate_sharing(ray_start_cluster, use_actors, node_failure):
config = {
"num_heartbeats_timeout": 10,
"raylet_heartbeat_period_milliseconds": 100,
}
cluster = Cluster()
# Head node with no resources.
cluster.add_node(num_cpus=0, _system_config=config)
ray.init(address=cluster.address)
# Node to place the parent actor.
node_to_kill = cluster.add_node(num_cpus=1, resources={"parent": 1})
# Node to place the child actor.
cluster.add_node(num_cpus=1, resources={"child": 1})
cluster.wait_for_nodes()
@ray.remote
def sleep():
time.sleep(1000)
@ray.remote(resources={"child": 1})
def probe():
return
# TODO(swang): This test does not pass if max_restarts > 0 for the
# raylet codepath. Add this parameter once the GCS actor service is enabled
# by default.
@ray.remote
class Actor(object):
def __init__(self):
return
def start_child(self, use_actors):
if use_actors:
child = Actor.options(resources={"child": 1}).remote()
ray.get(child.sleep.remote())
else:
ray.get(sleep.options(resources={"child": 1}).remote())
def sleep(self):
time.sleep(1000)
def get_pid(self):
return os.getpid()
# Returns whether the "child" resource is available.
def child_resource_available():
p = probe.remote()
ready, _ = ray.wait([p], timeout=1)
return len(ready) > 0
# Test fate sharing if the parent process dies.
def test_process_failure(use_actors):
a = Actor.options(resources={"parent": 1}).remote()
pid = ray.get(a.get_pid.remote())
a.start_child.remote(use_actors=use_actors)
# Wait for the child to be scheduled.
wait_for_condition(lambda: not child_resource_available())
# Kill the parent process.
os.kill(pid, 9)
wait_for_condition(child_resource_available)
# Test fate sharing if the parent node dies.
def test_node_failure(node_to_kill, use_actors):
a = Actor.options(resources={"parent": 1}).remote()
a.start_child.remote(use_actors=use_actors)
# Wait for the child to be scheduled.
wait_for_condition(lambda: not child_resource_available())
# Kill the parent process.
cluster.remove_node(node_to_kill, allow_graceful=False)
node_to_kill = cluster.add_node(num_cpus=1, resources={"parent": 1})
wait_for_condition(child_resource_available)
return node_to_kill
if node_failure:
test_node_failure(node_to_kill, use_actors)
else:
test_process_failure(use_actors)
ray.state.state._check_connected()
keys = [
key for r in ray.state.state.redis_clients
for key in r.keys("WORKER_FAILURE*")
]
if node_failure:
assert len(keys) <= 1, len(keys)
else:
assert len(keys) <= 2, len(keys)
@pytest.mark.parametrize(
"ray_start_regular", [{
"_system_config": {
"ping_gcs_rpc_server_max_retries": 100
}
}],
indirect=True)
def test_gcs_server_failiure_report(ray_start_regular, log_pubsub):
p = log_pubsub
# Get gcs server pid to send a signal.
all_processes = ray.worker._global_node.all_processes
gcs_server_process = all_processes["gcs_server"][0].process
gcs_server_pid = gcs_server_process.pid
os.kill(gcs_server_pid, signal.SIGBUS)
msg = None
cnt = 0
# wait for max 30 seconds.
while cnt < 3000 and not msg:
msg = p.get_message()
if msg is None:
time.sleep(0.01)
cnt += 1
continue
data = json.loads(ray._private.utils.decode(msg["data"]))
assert data["pid"] == "gcs_server"
@pytest.mark.parametrize(
"ray_start_regular", [{
"_system_config": {
"task_retry_delay_ms": 500
}
}],
indirect=True)
def test_async_actor_task_retries(ray_start_regular):
# https://github.com/ray-project/ray/issues/11683
signal = SignalActor.remote()
@ray.remote
class DyingActor:
def __init__(self):
print("DyingActor init called")
self.should_exit = False
def set_should_exit(self):
print("DyingActor.set_should_exit called")
self.should_exit = True
async def get(self, x, wait=False):
print(f"DyingActor.get called with x={x}, wait={wait}")
if self.should_exit:
os._exit(0)
if wait:
await signal.wait.remote()
return x
# Normal in order actor task retries should work
dying = DyingActor.options(
max_restarts=-1,
max_task_retries=-1,
).remote()
assert ray.get(dying.get.remote(1)) == 1
ray.get(dying.set_should_exit.remote())
assert ray.get(dying.get.remote(42)) == 42
# Now let's try out of order retries:
# Task seqno 0 will return
# Task seqno 1 will be pending and retried later
# Task seqno 2 will return
# Task seqno 3 will crash the actor and retried later
dying = DyingActor.options(
max_restarts=-1,
max_task_retries=-1,
).remote()
# seqno 0
ref_0 = dying.get.remote(0)
assert ray.get(ref_0) == 0
# seqno 1
ref_1 = dying.get.remote(1, wait=True)
# seqno 2
ref_2 = | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Content-related interfaces.
"""
from __future__ import print_function, absolute_import, division
__docformat__ = "restructuredtext en"
import sys
logger = __import__('logging').getLogger(__name__)
# pylint:disable=inherit-non-class,too-many-ancestors,no-self-argument,abstract-method
# pylint:disable=useless-object-inheritance
PY2 = str is bytes
PYPY = hasattr(sys, 'pypy_version_info')
PYPY2 = PY2 and PYPY
if PY2: # pragma: no cover
import copy_reg # pylint:disable=import-error
text_type = unicode # pylint:disable=undefined-variable
else:
import copyreg as copy_reg
text_type = str
from zope import component
from zope import interface
from zope.interface.common.collections import ISequence
from zope.interface.common.builtins import INativeString
from zope.interface.common.builtins import IByteString
from zope.interface.common.builtins import ITextString
from zope.contenttype import add_files as zc_add_files
from zope.mimetype import mtypes as mime_types
from zope.schema import NativeStringLine
from nti.schema.field import IndexedIterable
mime_types.setup() # register interface classes and utilities if not already
resource_filename = __import__('pkg_resources').resource_filename
def _setup():
types_data = resource_filename('nti.contentfragments', "types.csv")
# Hmm. So this registers things in the zope.mimetype.types module
# The ZCML directive registers them in the specified module (I think)
# But we can't use that directive because we need them now in order to
# implement them.
data = mime_types.read(types_data)
ifs = mime_types.getInterfaces(data)
mime_types.registerUtilities(ifs, data)
mime_map_file = resource_filename('nti.contentfragments', 'mime.types')
zc_add_files([mime_map_file])
_setup()
# BWC aliases. These will be removed in the future.
IString = INativeString
IUnicode = ITextString
IBytes = IByteString
class IContentFragment(interface.Interface):
"""
Base interface representing different formats that content can
be in.
"""
class IUnicodeContentFragment(IContentFragment, ISequence):
"""
Content represented as a unicode string.
Although it is simplest to subclass :class:`unicode`, that is not required.
At a minimum, what is required are the `__getitem__` method (and others
declared by :class:`IReadSequence`), plus the `encode` method.
.. versionchanged:: 1.3.0
Extend ``zope.interface.common.collections.ISequence`` instead of the semi-deprecated
``zope.interface.common.sequence.IReadSequence``. Except on PyPy2, where
``ISequence`` cannot validate against unicode objects.
"""
# TODO: extend IUnicode?
if PYPY2: # pragma: no cover
IUnicodeContentFragment.__bases__ = tuple(
x
for x in IUnicodeContentFragment.__bases__
if x is not ISequence
)
@interface.implementer(IUnicodeContentFragment)
class UnicodeContentFragment(text_type):
"""
Subclasses should override the :meth:`__add__` method
to return objects that implement the appropriate (most derived, generally)
interface.
This object *DOES NOT* add a dictionary to the :class:`unicode` type.
In particular, it should not be weak referenced. Subclasses that
do not expect to be persisted in the ZODB *may* add additional attributes
by adding to the ``__slots__`` field (not the instance value).
"""
# We do need to allow the things used by zope.interface/zope.component
_ZCA_KEYS = ('__provides__',)
__slots__ = _ZCA_KEYS # actually meaningless, but we simulate this with __getattr__ and __setattr__
def __getattr__(self, name):
raise AttributeError(name)
def __setattr__(self, name, value):
# We do allow the attributes used by the ZCA
if name in type(self).__slots__:
super(UnicodeContentFragment, self).__setattr__(name, value)
return
raise AttributeError(name, type(self))
def __getattribute__(self, name):
if name in ('__dict__', '__weakref__'): # Though this does not actually prevent creating a weak ref
raise AttributeError(name, type(self))
if name == '__class__':
return type(self)
return text_type.__getattribute__(self, name)
def __setstate__(self, state):
# If we had any state saved due to bad pickles in the past
# ignore it. Do support the ZCA attributes
if state:
for k in self.__slots__:
v = state.pop(k, self)
if v is not self:
text_type.__setattr__(self, k, v)
# Anything left is bad and not supported. __parent__ was extremely common at one point
if state and (len(state) > 1 or '__parent__' not in state):
logger.warning("Ignoring bad state for %s: %s", self, state)
def __getstate__(self):
# Support just the ZCA attributes
try:
state = text_type.__getattribute__(self, '__dict__')
except AttributeError:
# Hmm, really is a slot
try:
state = {'__provides__': self.__provides__}
except AttributeError:
state = None
if state:
state = {k: v for k, v in state.items() if k in type(self).__slots__}
return state
return ()
def __reduce_ex__(self, protocol):
return (copy_reg.__newobj__, # Constructor
# Constructor args. Note we pass a real base unicode object;
# otherwise, we get infinite recursion as pickle tries to
# reduce use again using __unicode__
(type(self), self.encode('utf-8').decode('utf-8')),
self.__getstate__() or None,
None,
None)
def __unicode__(self):
""""
We are-a unicode instance, but if we don't override this method,
calling unicode(UnicodeContentFragment('')) produces a plain, base,
unicode object, thus losing all our interfaces.
"""
return self
if not PY2:
__str__ = __unicode__
def __getslice__(self, i, j):
# Part of IReadSequence, deprecated in 2.0, removed in 3,
# but we still must implement it to comply with the iface.
raise NotImplementedError()
def __rmul__(self, times):
result = text_type.__rmul__(self, times)
if result is not self:
result = self.__class__(result)
return result
def __mul__(self, times):
result = text_type.__mul__(self, times)
if result is not self:
result = self.__class__(result)
return result
def translate(self, table):
result = text_type.translate(self, table)
if result is not self:
result = self.__class__(result)
return result
def lower(self):
result = text_type.lower(self)
if result == self:
return self # NOTE this is slightly different than what a normal string does
return self.__class__(result)
def upper(self):
result = text_type.upper(self)
if result == self:
return self # NOTE this is slightly different than what a normal string does
return self.__class__(result)
# shut pylint up about 'bad container'; raise same error super does
def __delitem__(self, i):
raise TypeError()
def __setitem__(self, k, v):
raise TypeError()
IContentTypeTextLatex = getattr(mime_types, 'IContentTypeTextLatex')
class ILatexContentFragment(IUnicodeContentFragment, IContentTypeTextLatex):
"""
Interface representing content in LaTeX format.
"""
@interface.implementer(ILatexContentFragment)
class LatexContentFragment(UnicodeContentFragment):
pass
IContentTypeTextHtml = getattr(mime_types, 'IContentTypeTextHtml')
class IHTMLContentFragment(IUnicodeContentFragment, IContentTypeTextHtml):
"""
Interface representing content in HTML format.
"""
IContentTypeTextRst = getattr(mime_types, 'IContentTypeTextRst')
class IRstContentFragment(IUnicodeContentFragment, IContentTypeTextRst):
"""
Interface representing content in RST format.
"""
@interface.implementer(IRstContentFragment)
class RstContentFragment(UnicodeContentFragment):
pass
# NOTE The implementations of the add methods go directly to
# unicode and not up the super() chain to avoid as many extra
# copies as possible
def _add_(self, other, tuples):
result = text_type.__add__(self, other)
for pair in tuples:
if pair[0].providedBy(other):
result = pair[1](result)
break
return result
class _AddMixin(object):
_add_rules = ()
def __add__(self, other):
return _add_(self, other, self._add_rules)
@interface.implementer(IHTMLContentFragment)
class HTMLContentFragment(_AddMixin, UnicodeContentFragment):
pass
HTMLContentFragment._add_rules = ((IHTMLContentFragment, HTMLContentFragment),)
class ISanitizedHTMLContentFragment(IHTMLContentFragment):
"""
HTML content, typically of unknown or untrusted provenance,
that has been sanitized for "safe" presentation in a generic,
also unknown browsing context.
Typically this will mean that certain unsafe constructs, such
as <script> tags have been removed.
"""
@interface.implementer(ISanitizedHTMLContentFragment)
class SanitizedHTMLContentFragment(HTMLContentFragment):
pass
# TODO: What about the rules for the other types?
SanitizedHTMLContentFragment._add_rules = \
((ISanitizedHTMLContentFragment, SanitizedHTMLContentFragment),) + \
HTMLContentFragment._add_rules
IContentTypeTextPlain = getattr(mime_types, 'IContentTypeTextPlain')
class IPlainTextContentFragment(IUnicodeContentFragment, IContentTypeTextPlain):
"""
Interface representing content in plain text format.
"""
@interface.implementer(IPlainTextContentFragment)
class PlainTextContentFragment(UnicodeContentFragment):
pass
@interface.implementer(IPlainTextContentFragment)
@component.adapter(IPlainTextContentFragment)
def _plain_text_to_plain_text(text):
# We shouldn't actually be able to get here.
return text # pragma: no cover
from zope.schema.interfaces import ITokenizedTerm
class ICensoredTerm(ITokenizedTerm):
"""
Base interface for a censored term
"""
class IProfanityTerm(ICensoredTerm):
"""
Base interface for a profanity term
"""
class ICensoredUnicodeContentFragment(IUnicodeContentFragment):
"""
A content fragment that has passed through a censoring process to
attempt to ensure it is safe for display to its intended audience (e.g.,
profanity has been removed if the expected audience is underage/sensitive to
that).
The rules for censoring content will be very context specific. In
particular, it will depend on *who* you are, and *where* you are
adding/editing content. The *who* is important to differentiate
between, e.g., students and teachers. The *where* is important to
differentiate between, say, a public forum, and your private notes, or
between your Human Sexuality textbook and your Calculus textbook.
For this reason, the censoring process will typically utilize
multi-adapters registered on (creator, content_unit). Contrast this with
sanitizing HTML, which always follows the same process.
"""
@interface.implementer(ICensoredUnicodeContentFragment)
class CensoredUnicodeContentFragment(_AddMixin, UnicodeContentFragment):
pass
CensoredUnicodeContentFragment._add_rules = (
(ICensoredUnicodeContentFragment, CensoredUnicodeContentFragment),
(IUnicodeContentFragment, UnicodeContentFragment)
)
class ICensoredPlainTextContentFragment(IPlainTextContentFragment, ICensoredUnicodeContentFragment):
pass
@interface.implementer(ICensoredPlainTextContentFragment)
class CensoredPlainTextContentFragment(PlainTextContentFragment):
pass
PlainTextContentFragment.censored = lambda s, n: CensoredPlainTextContentFragment(n)
CensoredPlainTextContentFragment.censored = lambda s, n: CensoredPlainTextContentFragment(n)
class ICensoredHTMLContentFragment(IHTMLContentFragment, ICensoredUnicodeContentFragment):
pass
@interface.implementer(ICensoredHTMLContentFragment)
class CensoredHTMLContentFragment(HTMLContentFragment):
pass
CensoredHTMLContentFragment._add_rules = \
((ICensoredHTMLContentFragment, CensoredHTMLContentFragment),) + \
CensoredUnicodeContentFragment._add_rules
CensoredHTMLContentFragment.censored = lambda s, n: CensoredHTMLContentFragment(n)
class ICensoredSanitizedHTMLContentFragment(ISanitizedHTMLContentFragment, ICensoredHTMLContentFragment):
pass
@interface.implementer(ICensoredSanitizedHTMLContentFragment)
class CensoredSanitizedHTMLContentFragment(CensoredHTMLContentFragment):
pass
# The rules here place sanitization ahead of censoring, because sanitization
# can cause security problems for end users; censoring is just offensive
CensoredSanitizedHTMLContentFragment._add_rules = (
((ICensoredSanitizedHTMLContentFragment, CensoredSanitizedHTMLContentFragment),
(ISanitizedHTMLContentFragment, SanitizedHTMLContentFragment),)
+ CensoredHTMLContentFragment._add_rules
+ HTMLContentFragment._add_rules
)
HTMLContentFragment.censored = lambda s, n: CensoredHTMLContentFragment(n)
UnicodeContentFragment.censored = lambda s, n: CensoredUnicodeContentFragment(n)
SanitizedHTMLContentFragment.censored = lambda s, n: CensoredSanitizedHTMLContentFragment(n)
CensoredSanitizedHTMLContentFragment.censored = lambda s, n: CensoredSanitizedHTMLContentFragment(n)
# See http://code.google.com/p/py-contentfilter/
# and https://hkn.eecs.berkeley.edu/~dyoo/python/ahocorasick/
class ICensoredContentScanner(interface.Interface):
"""
Something that can perform censoring.
Variations of censoring scanners will be registered
as named utilities. Particular censoring solutions (the adapters discussed
in :class:`ICensoredUnicodeContentFragment`) will put together
a combination of these utilities to produce the desired result.
The censoring process can further be broken down into two parts:
detection of unwanted content, and reacting to unwanted content. For example,
reacting might consist of | |
<reponame>Goldfish64/CorpBot.py<filename>Cogs/Xp.py
import asyncio
import discord
import datetime
import random
from discord.ext import commands
from operator import itemgetter
from Cogs import Settings
# This is the xp module. It's likely to be retarded.
class Xp:
# Init with the bot reference, and a reference to the settings var
def __init__(self, bot, settings):
self.bot = bot
self.settings = settings
self.bot.loop.create_task(self.addXP())
def message(self, message):
# Check the message and see if we should allow it - always yes.
# This module doesn't need to cancel messages.
return { 'Ignore' : False, 'Delete' : False}
async def addXP(self):
while not self.bot.is_closed:
await asyncio.sleep(3600) # runs only every 1 hour (3600 seconds)
print("Adding XP: {}".format(datetime.datetime.now().time().isoformat()))
for server in self.bot.servers:
# Iterate through the servers and add them
xpAmount = self.settings.getServerStat(server, "HourlyXP")
onlyOnline = self.settings.getServerStat(server, "RequireOnline")
for user in server.members:
bumpXP = False
if onlyOnline.lower() == "no":
bumpXP = True
else:
if str(user.status).lower() == "online":
bumpXP = True
if bumpXP:
boost = int(self.settings.getServerStat(server, "IncreasePerRank"))
promoteBy = self.settings.getServerStat(server, "PromoteBy")
if promoteBy.lower() == "position":
maxPos = int(self.settings.getServerStat(server, "MaxPosition"))
else:
promoArray = self.settings.getServerStat(server, "PromotionArray")
maxPos = len(promoArray)-1
biggest = 0
xpPayload = 0
for role in user.roles:
if role.position <= maxPos and role.position > biggest:
biggest = role.position
xpPayload = int(xpAmount)
self.settings.incrementStat(user, server, "XPReserve", xpPayload)
@commands.command(pass_context=True)
async def setxp(self, ctx, member : discord.Member = None, xpAmount : int = None):
"""Sets an absolute value for the member's xp (admin only)."""
author = ctx.message.author
server = ctx.message.server
channel = ctx.message.channel
isAdmin = author.permissions_in(channel).administrator
# Only allow admins to change server stats
if not isAdmin:
await self.bot.send_message(channel, 'You do not have sufficient privileges to access this command.')
return
# Check for formatting issues
if not (xpAmount or member):
msg = 'Usage: `$setxp [member] [amount]`'
await self.bot.send_message(channel, msg)
return
if not type(xpAmount) is int:
msg = 'Usage: `$setxp [member] [amount]`'
await self.bot.send_message(channel, msg)
return
if xpAmount < 0:
msg = 'Usage: `$setxp [member] [amount]`'
await self.bot.send_message(channel, msg)
return
if type(member) is str:
try:
member = discord.utils.get(server.members, name=member)
except:
print("That member does not exist")
return
self.settings.setUserStat(member, server, "XP", xpAmount)
msg = '*{}\'s* xp was set to *{}!*'.format(member.name, xpAmount)
await self.bot.send_message(channel, msg)
await self.checkroles(member, channel)
@setxp.error
async def setxp_error(self, ctx, error):
# do stuff
msg = 'setxp Error: {}'.format(ctx)
await self.bot.say(msg)
@commands.command(pass_context=True)
async def xp(self, ctx, member : discord.Member = None, xpAmount : int = None):
"""Gift xp to other members."""
author = ctx.message.author
server = ctx.message.server
channel = ctx.message.channel
# Check for formatting issues
if xpAmount == None or member == None:
msg = 'Usage: `$xp [member] [amount]`'
await self.bot.send_message(channel, msg)
return
if not type(xpAmount) is int:
msg = 'Usage: `$xp [member] [amount]`'
await self.bot.send_message(channel, msg)
return
if type(member) is str:
try:
member = discord.utils.get(server.members, name=member)
except:
print("That member does not exist")
return
# Get our user/server stats
isAdmin = author.permissions_in(channel).administrator
adminUnlim = self.settings.getServerStat(server, "AdminUnlimited")
reserveXP = self.settings.getUserStat(author, server, "XPReserve")
minRole = self.settings.getServerStat(server, "MinimumXPRole")
approve = True
decrement = True
# MinimumXPRole
if author.top_role.position < int(minRole):
approve = False
msg = 'You don\'t have the permissions to give xp.'
if xpAmount > int(reserveXP):
approve = False
msg = 'You can\'t give *{} xp*, you only have *{}!*'.format(xpAmount, reserveXP)
if author == member:
approve = False
msg = 'You can\'t give yourself xp! *Nice try...*'
if xpAmount < 0:
msg = 'Only admins can take away xp!'
approve = False
# Check admin last - so it overrides anything else
if isAdmin and adminUnlim.lower() == "yes":
# No limit - approve
approve = True
decrement = False
userRole = member.top_role.position
if approve:
# XP was approved! Let's say it - and check decrement from gifter's xp reserve
msg = '*{}* was given *{} xp!*'.format(member.name, xpAmount)
await self.bot.send_message(channel, msg)
newXP = self.settings.incrementStat(member, server, "XP", xpAmount)
if decrement:
self.settings.incrementStat(author, server, "XPReserve", (-1*xpAmount))
# Now we check for promotions
await self.checkroles(member, channel)
else:
await self.bot.send_message(channel, msg)
@xp.error
async def xp_error(self, ctx, error):
msg = 'xp Error: {}'.format(ctx)
await self.bot.say(msg)
@commands.command(pass_context=True)
async def gamble(self, ctx, bet : int = None):
"""Gamble your xp reserves for a chance at winning xp!"""
author = ctx.message.author
server = ctx.message.server
channel = ctx.message.channel
# bet must be a multiple of 10, member must have enough xpreserve to bet
msg = 'Usage: `gamble [xp reserve bet] (must be multiple of 10)`'
if not (bet or type(bet) == int):
await self.bot.send_message(channel, msg)
return
if not type(bet) == int:
await self.bot.send_message(channel, msg)
return
isAdmin = author.permissions_in(channel).administrator
adminUnlim = self.settings.getServerStat(server, "AdminUnlimited")
reserveXP = self.settings.getUserStat(author, server, "XPReserve")
minRole = self.settings.getServerStat(server, "MinimumXPRole")
approve = True
decrement = True
# Check Bet
if not bet % 10 == 0:
approve = False
msg = 'Bets must be in multiples of *10!*'
if bet > int(reserveXP):
approve = False
msg = 'You can\'t bet *{}*, you only have *{}* xp reserve!'.format(bet, reserveXP)
if bet < 0:
msg = 'You can\'t bet negative amounts!'
approve = False
if bet == 0:
msg = 'You can\'t bet *nothing!*'
approve = False
if author.top_role.position < int(minRole):
approve = False
msg = 'You don\'t have the permissions to bet.'
# Check admin last - so it overrides anything else
if isAdmin and adminUnlim.lower() == "yes":
# No limit - approve
approve = True
decrement = False
if approve:
# Bet was approved - let's take the XPReserve right away
if decrement:
takeReserve = -1*bet
self.settings.incrementStat(author, server, "XPReserve", takeReserve)
# Bet more, less chance of winning, but more winnings!
if bet < 100:
betChance = 5
payout = int(bet/10)
elif bet < 500:
betChance = 15
payout = int(bet/4)
else:
betChance = 25
payout = int(bet/2)
# 1/betChance that user will win - and payout is 1/10th of the bet
randnum = random.randint(1, betChance)
# print('{} : {}'.format(randnum, betChance))
if randnum == 1:
# YOU WON!!
self.settings.incrementStat(author, server, "XP", int(payout))
msg = '{} bet {} and ***WON*** *{} xp!*'.format(author.name, bet, int(payout))
else:
msg = '*{}* bet *{}* and.... *didn\'t* win. Better luck next time!'.format(author.name, bet)
await self.bot.send_message(ctx.message.channel, msg)
async def checkroles(self, user, channel):
# This method checks whether we need to promote, demote, or whatever
# then performs the said action, and outputs.
server = channel.server
# Get our preliminary vars
msg = None
xpPromote = self.settings.getServerStat(server, "XPPromote")
xpDemote = self.settings.getServerStat(server, "XPDemote")
promoteBy = self.settings.getServerStat(server, "PromoteBy")
requiredXP = int(self.settings.getServerStat(server, "RequiredXP"))
maxPosition = self.settings.getServerStat(server, "MaxPosition")
padXP = self.settings.getServerStat(server, "PadXPRoles")
difficulty = int(self.settings.getServerStat(server, "DifficultyMultiplier"))
userXP = self.settings.getUserStat(user, server, "XP")
# Apply the pad
userXP = int(userXP)+(int(requiredXP)*int(padXP))
if xpPromote.lower() == "yes":
# We use XP to promote - let's check our levels
if promoteBy.lower() == "position":
# We use the position to promote
# For now, this should be unused - it's unreliable
gotLevels = 0
for x in range(0, int(maxPosition)+1):
# Get required xp per level
required = (requiredXP*x) + (requiredXP*difficulty)
if userXP >= required:
gotLevels = x
if gotLevels > int(maxPosition):
# If we got too high - let's even out
gotLevels = int(maxPosition)
# Add 1 for our range, since it goes from 0 -> (gotLevels-1)
gotLevels+=1
for x in range(0, gotLevels):
# fill in all the roles between
for role in server.roles:
if role.position < gotLevels:
if not role in user.roles:
# Only add if we need to
await self.bot.add_roles(user, role)
msg = '*{}* was promoted to **{}**!'.format(user.name, discord.utils.get(server.roles, position=gotLevels).name)
elif promoteBy.lower() == "array":
# This is, by far, the more functional way
promoArray = self.settings.getServerStat(server, "PromotionArray")
for role in promoArray:
# Iterate through the roles, and add which we have xp for
if int(role['XP']) <= userXP:
# We *can* have this role, let's see if we already do
currentRole = None
for aRole in server.roles:
# Get the role that corresponds to the id
if aRole.id == role['ID']:
# We found it
currentRole = aRole
# Now see if we have it, and add it if we don't
if not currentRole in user.roles:
await self.bot.add_roles(user, currentRole)
msg = '*{}* was promoted to **{}**!'.format(user.name, currentRole.name)
else:
if xpDemote.lower() == "yes":
# Let's see if we have this role, and remove it. Demote time!
currentRole = None
for aRole in server.roles:
# Get the role that corresponds to the id
if aRole.id == role['ID']:
# We found it
currentRole = aRole
# Now see if we have it, and take it away!
if currentRole in user.roles:
await self.bot.remove_roles(user, currentRole)
msg = '*{}* was demoted from **{}**!'.format(user.name, currentRole.name)
# Check if we have a message to display - and display it
if msg:
await self.bot.send_message(channel, msg)
@commands.command(pass_context=True)
async def listroles(self, ctx):
"""Lists all roles, id's, and xp requirements for the xp promotion/demotion system."""
server = ctx.message.server
channel = ctx.message.channel
# Get the array
promoArray = self.settings.getServerStat(server, "PromotionArray")
# Sort by XP first, then by name
promoSorted = sorted(promoArray, key=itemgetter('XP', 'Name'))
roleText = "Current Roles:\n"
for arole in promoSorted:
roleText = '{}**{}** : *{} XP*\n'.format(roleText, arole['Name'], arole['XP'], arole['ID'])
await self.bot.send_message(channel, roleText)
@commands.command(pass_context=True)
async def rank(self, ctx, member: discord.Member = None):
"""Say the highest rank of a listed member."""
if member is None:
member = ctx.message.author
if type(member) is str:
try:
member = discord.utils.get(server.members, name=member)
except:
print("That member does not exist")
return
promoArray = self.settings.getServerStat(ctx.message.server, "PromotionArray")
promoSorted = sorted(promoArray, key=itemgetter('XP', 'Name'))
highestRole = ""
for role in promoSorted:
# We *can* have this role, let's see if we already do
currentRole = None
for aRole in member.roles:
# Get the role that corresponds to the id
if aRole.id == role['ID']:
# We found it
highestRole = aRole.name
if highestRole == "":
msg = '*{}* has not acquired a rank yet.'.format(member.name)
else:
msg = '*{}* is a **{}**!'.format(member.name, highestRole)
await self.bot.send_message(ctx.message.channel, msg)
@rank.error
async def rank_error(self, ctx, error):
msg = 'rank Error: {}'.format(ctx)
await self.bot.say(msg)
# List the xp and xp reserve of a user
@commands.command(pass_context=True)
async def stats(self, ctx, member: discord.Member = None):
"""List the xp and xp reserve of a listed member."""
if member is None:
member = ctx.message.author
if type(member) is str:
try:
member = discord.utils.get(server.members, name=member)
except:
print("That | |
25: continue
if abs(x_start-50) > 30: continue
# use different data if shuffle:
# if shuffle_time:
# experiment, condition, mouse, trial = mouse_trial_list[np.random.randint(len(mouse_trial_list))]
# else:
# experiment, condition, mouse, trial = experiment_real, condition_real, mouse_real, trial_real
''' just use real mouse '''
experiment, condition, mouse, trial = experiment_real, condition_real, mouse_real, trial_real
''' control ICs, real escape '''
# # get the angle turned during the escape
angle_turned[trial_num] = self.analysis[experiment_real][condition_real]['movement'][mouse_real][trial_real][2]
# angle_turned[trial_num] = abs(self.analysis[experiment_real][condition_real]['edginess'][mouse_real][trial_real])
# get the angle turned, delta x, delta y, and delta phi of previous homings
bout_start_angle = self.analysis[experiment_real][condition_real]['movement'][mouse_real][trial_real][1]
bout_start_position = self.analysis[experiment_real][condition_real]['movement'][mouse_real][trial_real][0]
start_time = self.analysis[experiment_real][condition_real]['start time'][mouse_real][trial_real]
# get initial conditions and endpoint quantities
IC_x = np.array(self.analysis[experiment][condition]['prev movements'][mouse][trial][0][-ETD:])
IC_y = np.array(self.analysis[experiment][condition]['prev movements'][mouse][trial][1][-ETD:])
IC_angle = np.array(self.analysis[experiment][condition]['prev movements'][mouse][trial][2][-ETD:])
IC_time = np.array(self.analysis[experiment][condition]['prev homings'][mouse][trial][3][-ETD:])
turn_angles = np.array(self.analysis[experiment][condition]['prev movements'][mouse][trial][3][-ETD:])
# MOE = 10
# x_edge_trial = self.analysis[experiment][condition]['x edge'][mouse][trial]
# SH_x = np.array(self.analysis[experiment][condition]['prev homings'][mouse][trial][0][-ETD:])
# if x_edge_trial > 50 and np.sum(SH_x > 25 + MOE):
# IC_x = IC_x[SH_x > 25 + MOE]
# IC_y = IC_y[SH_x > 25 + MOE]
# IC_angle = IC_angle[SH_x > 25 + MOE]
# IC_time = IC_time[SH_x > 25 + MOE]
# turn_angles = turn_angles[SH_x > 25 + MOE]
# elif np.sum(SH_x > 75 - MOE):
# IC_x = IC_x[SH_x > 75 - MOE]
# IC_y = IC_y[SH_x > 75 - MOE]
# IC_angle = IC_angle[SH_x > 75 - MOE]
# IC_time = IC_time[SH_x > 75 - MOE]
# turn_angles = turn_angles[SH_x > 75 - MOE]
if not shuffle_time: # gather previous movements
IC_x_all = np.concatenate((IC_x_all, IC_x))
IC_y_all = np.concatenate((IC_y_all, IC_y))
IC_angle_all = np.concatenate((IC_angle_all, IC_angle))
IC_time_all = np.concatenate((IC_time_all, IC_time))
turn_angles_all = np.concatenate((turn_angles_all, turn_angles))
else:
# sample randomly from these movements
random_idx = np.random.choice(len(IC_x_all), len(IC_x_all), replace = False)
IC_x = IC_x_all[random_idx]
IC_y = IC_y_all[random_idx]
IC_angle = IC_angle_all[random_idx]
IC_time = IC_time_all[random_idx]
turn_angles = turn_angles_all[random_idx]
# calculate difference in ICs
delta_x = abs( np.array(IC_x - bout_start_position[0]) )
delta_y = abs( np.array(IC_y - bout_start_position[1]) )
delta_angle = abs( np.array(IC_angle - bout_start_angle) )
delta_angle[delta_angle > 180] = 360 - delta_angle[delta_angle > 180]
delta_time = start_time - np.array(IC_time)
''' prediction data -- angle turned is a function of prev movement and ICs '''
x_weights = (1 / (delta_x+.0001)) / np.sum(1/(delta_x+.0001))
y_weights = (1 / (delta_y+.0001)) / np.sum(1 / (delta_y+.0001))
angle_weights = (1 / (delta_angle+.0001)) / np.sum(1 / (delta_angle+.0001))
time_weights = (1 / (delta_time+.0001)) / np.sum(1 / (delta_time+.0001))
x_pred[trial_num] = np.sum(turn_angles * x_weights)
y_pred[trial_num] = np.sum(turn_angles * y_weights)
angle_pred[trial_num] = np.sum(turn_angles * angle_weights)
time_pred[trial_num] = np.sum(turn_angles * time_weights) * 0
mean_pred[trial_num] = np.mean(turn_angles) * 0
# try mean pred is the *closest* angle to real
# x_pred[trial_num] = 0
# y_pred[trial_num] = 0
# angle_pred[trial_num] = 0
# time_pred[trial_num] = 0
# mean_pred[trial_num] = turn_angles[np.argmin( abs(turn_angles - angle_turned[trial_num]) )]
# ''' turn angle prediction to edginess prediction '''
if not shuffle_time:
edginess[trial_num] = abs(self.analysis[experiment][condition]['edginess'][mouse][trial])
initial_body_angle[trial_num] = self.analysis[experiment_real][condition_real]['movement'][mouse_real][trial_real][1]
initial_x[trial_num] = self.analysis[experiment_real][condition_real]['movement'][mouse_real][trial_real][0][0]
initial_y[trial_num] = self.analysis[experiment_real][condition_real]['movement'][mouse_real][trial_real][0][1]
x_edge[trial_num] = self.analysis[experiment][condition]['x edge'][mouse][trial_real]
# add mouse and trial to list of mice and trials
if not shuffle_time:
mouse_trial_list.append([experiment, condition, mouse, trial])
t+=1
''' concatenate??... '''
# angle_turned_all = np.concatenate((angle_turned_all, angle_turned))
#
# x_pred_all = np.concatenate((x_pred_all, x_pred))
# y_pred_all = np.concatenate((y_pred_all, y_pred))
# angle_pred_all = np.concatenate((angle_pred_all, angle_pred))
# time_pred_all = np.concatenate((time_pred_all, time_pred ))
# mean_pred_all = np.concatenate((mean_pred_all, mean_pred ))
#
#
# IC_angle_array = np.ones((len(angle_turned_all[~np.isnan(angle_turned_all)]), 5))
# angle_metrics = [x_pred_all[~np.isnan(angle_turned_all)], y_pred_all[~np.isnan(angle_turned_all)], angle_pred_all[~np.isnan(angle_turned_all)], \
# time_pred_all[~np.isnan(angle_turned_all)], mean_pred_all[~np.isnan(angle_turned_all)]]
# for i, angle_metric in enumerate(angle_metrics): #
# IC_angle_array[:, i] = angle_metric
#
# # get the data
# predict_data_y_all = [ angle_turned_all[~np.isnan(angle_turned_all)].reshape(-1, 1)] # for the movements input data
''' don't concatenate... '''
IC_angle_array = np.ones((len(angle_turned[~np.isnan(angle_turned)]), 5))
angle_metrics = [x_pred[~np.isnan(angle_turned)], y_pred[~np.isnan(angle_turned)],
angle_pred[~np.isnan(angle_turned)], \
time_pred[~np.isnan(angle_turned)], mean_pred[~np.isnan(angle_turned)]]
for i, angle_metric in enumerate(angle_metrics): #
IC_angle_array[:, i] = angle_metric
# get the data
predict_data_y_all_angle = [angle_turned[~np.isnan(angle_turned)].reshape(-1, 1)] # for the movements input data
predict_data_y_all_edgy = [edginess[~np.isnan(edginess)].reshape(-1, 1)] # for the movements input data
data_y_labels = ['angle']
predict_data_x_all = [IC_angle_array] # turn angles
predict_data_y_all = predict_data_y_all_angle # angles
''' predict edginess from turn angle '''
predict_edginess = True
if predict_edginess:
if not shuffle_time:
initial_body_angle = initial_body_angle[~np.isnan(initial_body_angle)].reshape(-1, 1)
initial_x = initial_x[~np.isnan(initial_x)].reshape(-1, 1)
initial_y = initial_y[~np.isnan(initial_y)].reshape(-1, 1)
x_edge = x_edge[~np.isnan(x_edge)].reshape(-1, 1)
# create the model
LR = linear_model.Ridge(alpha=.1)
# train the model
LR.fit(predict_data_x_all[0], predict_data_y_all_angle[0])
print(LR.score(predict_data_x_all[0], predict_data_y_all_angle[0]))
# get the model prediction
# model_prediction = LR.predict(predict_data_x_all[0])
model_prediction = predict_data_y_all_angle[0]
# predict body angles after turn
predicted_body_angle = initial_body_angle[~np.isnan(initial_body_angle)].reshape(-1, 1) - model_prediction
predicted_body_angle[predicted_body_angle >180] = predicted_body_angle[predicted_body_angle >180] - 360
predicted_body_angle[(predicted_body_angle > 0) * (predicted_body_angle < 90)] = -1 # super edgy to the right
predicted_body_angle[(predicted_body_angle > 0) * (predicted_body_angle > 90)] = 1 # super edgy to the right
# predict position at y = 40; set reasonable boundaries
x_at_40 = np.maximum(15 * np.ones_like(initial_x), np.minimum(90 * np.ones_like(initial_x),
initial_x - (40 - initial_y) / np.tan(np.deg2rad(predicted_body_angle)) ))
# get edginess
y_pos_end = 86.5; x_pos_end = 50; y_edge = 50
slope = (y_pos_end - initial_y) / (x_pos_end - (initial_x+.0001))
intercept = initial_y - initial_x * slope
distance_to_line = abs(40 - slope * x_at_40 - intercept) / np.sqrt((-slope) ** 2 + (1) ** 2)
homing_vector_at_center = (40 - intercept) / slope
# do line from starting position to edge position
slope = (y_edge - initial_y) / (x_edge - initial_x)
intercept = initial_y - initial_x * slope
distance_to_edge = abs(40 - slope * x_at_40 - intercept) / np.sqrt((-slope) ** 2 + (1) ** 2)
# compute the max possible deviation
edge_vector_at_center = (40 - intercept) / slope
line_to_edge_offset = abs(homing_vector_at_center - edge_vector_at_center) # + 5
# get index at center point (wall location)
# prev_edginess = np.maximum(np.zeros_like(distance_to_line), np.minimum(1.2*np.ones_like(distance_to_line),
# (distance_to_line - distance_to_edge + line_to_edge_offset) / (2 * line_to_edge_offset) ))
prev_edginess = abs((distance_to_line - distance_to_edge + line_to_edge_offset) / (2 * line_to_edge_offset))
predict_data_x_all = [prev_edginess] # predicted prev edginess #scipy.stats.zscore(
predict_data_y_all = predict_data_y_all_edgy # edginess
# edgy input colors
input_colors = [ [[0, .6, .4], [.5,.5,.5]], [[0, .6, .4], [.5,.5,.5]], [[.6, 0, .4], [.5,.5,.5]] ]
# split the data for cross val
num_trials = 1000 - 985 * shuffle_time #985
# loop acros angle prediction and traj prediction
for i, (fig, ax, predict_data_x) in enumerate(zip([fig1, fig2, fig3],[ax1, ax2, ax3], predict_data_x_all)):
# get prediction data
predict_data_y = predict_data_y_all[i]
# get color
color = input_colors[i][int(shuffle_time)]
# initialize prediction arrays
prediction_scores = np.zeros(num_trials)
for j in range(num_trials):
test_size = 0.5
# test_size = 0.25
# if shuffle_time: test_size = 0.25
# get x-val set
X_train, X_test, y_train, y_test = train_test_split(predict_data_x, \
predict_data_y, test_size=test_size, random_state=j)
# create the model
LR = linear_model.Ridge(alpha = .1) # .15, .5
# train the model
LR.fit(X_train, y_train)
# get the score
prediction_scores[j] = LR.score(X_test, y_test)
# exclude super negative ones
# prediction_scores = prediction_scores[prediction_scores > np.percentile(prediction_scores, 10)]
# put into larger array
prediction_scores_all = np.concatenate((prediction_scores_all, prediction_scores))
print(np.median(prediction_scores_all))
# exclude super negative ones
# prediction_scores_all = prediction_scores_all[prediction_scores_all > np.percentile(prediction_scores_all, 5)]
#do kde
kde = fit_kde(prediction_scores_all, bw=.03) # .04
plot_kde(ax, kde, prediction_scores_all, z = 0, vertical=False, color=color, violin=False, clip=False) # True)
#plt.show()
fig.savefig(os.path.join(self.summary_plots_folder,'Predictions of ' + data_y_labels[i] + ' - ' + self.labels[c] + '.png'), format='png')
fig.savefig(os.path.join(self.summary_plots_folder,'Predictions of ' + data_y_labels[i] + ' - ' + self.labels[c] + '.eps'), format='eps')
plt.show()
print('hi')
else:
'''
PREDICTION PLOTS EDGINESS OR BY **EXPLORATION**
'''
fps = 30
escape_duration = 12
ETD = 10 #4
traj_loc = 40
# mean_types = ['even', 'space', 'angle'] #, 'time', 'shelter time']
mean_types = ['space', 'angle', 'shelter time'] #, 'escape']
mean_type = 'even'
mean_colors = [[0, .6, .4], [0, .6, .8], [0, .6, .8], [.4, 0, 1] ]
mean_colors = [[0, .6, .4], [.4, 0, .8], [0, .6, .8], [.5, .5, .5]]
# initialize figures
fig1, ax1, fig2, ax2, fig3, ax3 = initialize_figures_prediction(self)
for m, mean_type in enumerate(mean_types):
# loop over experiments and conditions
for c, (experiment, condition) in enumerate(zip(self.experiments, self.conditions)):
# extract experiments from nested list
sub_experiments, sub_conditions = extract_experiments(experiment, condition)
# get the number of trials
number_of_trials = get_number_of_trials(sub_experiments, sub_conditions, self.analysis)
number_of_mice = get_number_of_mice(sub_experiments, sub_conditions, self.analysis)
mouse_trial_list = []
| |
will be set to ``True``
unless explicitly set to ``False`` in `kwargs` (e.g. active = False)
"""
kwargs = self._prepare_create_user_args(**kwargs)
user = self.user_model(**kwargs)
return self.put(user) # type: ignore
def delete_user(self, user: "User") -> None:
"""Deletes the specified user.
:param user: The user to delete
"""
self.delete(user) # type: ignore
def reset_user_access(self, user: "User") -> None:
"""
Use this method to reset user authentication methods in the case of compromise.
This will:
* reset fs_uniquifier - which causes session cookie, remember cookie, auth
tokens to be unusable
* reset fs_token_uniquifier (if present) - cause auth tokens to be unusable
* remove all unified signin TOTP secrets so those can't be used
* remove all two-factor secrets so those can't be used
Note that if using unified sign in and allow 'email' as a way to receive a code;
if the email is compromised - login is still possible. To handle this - it
is better to deactivate the user.
Note - this method isn't used directly by Flask-Security - it is provided
as a helper for an application's administrative needs.
Remember to call commit on DB if needed.
.. versionadded:: 3.4.1
"""
self.set_uniquifier(user)
self.set_token_uniquifier(user)
if hasattr(user, "us_totp_secrets"):
self.us_reset(user)
if hasattr(user, "tf_primary_method"):
self.tf_reset(user)
def tf_set(
self,
user: "User",
primary_method: str,
totp_secret: t.Optional[str] = None,
phone: t.Optional[str] = None,
) -> None:
"""Set two-factor info into user record.
This carefully only changes things if different.
If totp_secret isn't provided - existing one won't be changed.
If phone isn't provided, the existing phone number won't be changed.
This could be called from an application to apiori setup a user for two factor
without the user having to go through the setup process.
To get a totp_secret - use ``app.security._totp_factory.generate_totp_secret()``
.. versionadded: 3.4.1
"""
changed = False
if user.tf_primary_method != primary_method:
user.tf_primary_method = primary_method
changed = True
if totp_secret and user.tf_totp_secret != totp_secret:
user.tf_totp_secret = totp_secret
changed = True
if phone and user.tf_phone_number != phone:
user.tf_phone_number = phone
changed = True
if changed:
self.put(user) # type: ignore
def tf_reset(self, user: "User") -> None:
"""Disable two-factor auth for user
.. versionadded: 3.4.1
"""
user.tf_primary_method = None
user.tf_totp_secret = None
user.tf_phone_number = None
self.put(user) # type: ignore
def us_get_totp_secrets(self, user: "User") -> t.Dict[str, str]:
"""Return totp secrets.
These are json encoded in the DB.
Returns a dict with methods as keys and secrets as values.
.. versionadded:: 3.4.0
"""
if not user.us_totp_secrets:
return {}
return json.loads(user.us_totp_secrets)
def us_put_totp_secrets(
self, user: "User", secrets: t.Optional[t.Dict[str, str]]
) -> None:
"""Save secrets. Assume to be a dict (or None)
with keys as methods, and values as (encrypted) secrets.
.. versionadded:: 3.4.0
"""
user.us_totp_secrets = json.dumps(secrets) if secrets else None
self.put(user) # type: ignore
def us_set(
self,
user: "User",
method: str,
totp_secret: t.Optional[str] = None,
phone: t.Optional[str] = None,
) -> None:
"""Set unified sign in info into user record.
If totp_secret isn't provided - existing one won't be changed.
If phone isn't provided, the existing phone number won't be changed.
This could be called from an application to apiori setup a user for unified
sign in without the user having to go through the setup process.
To get a totp_secret - use ``app.security._totp_factory.generate_totp_secret()``
.. versionadded: 3.4.1
"""
if totp_secret:
totp_secrets = self.us_get_totp_secrets(user)
totp_secrets[method] = totp_secret
self.us_put_totp_secrets(user, totp_secrets)
if phone and user.us_phone_number != phone:
user.us_phone_number = phone
self.put(user) # type: ignore
def us_reset(self, user: "User") -> None:
"""Disable unified sign in for user.
Be aware that if "email" is an allowed way to receive codes, they
will still work (as totp secrets are generated on the fly).
This will disable authenticator app and SMS.
.. versionadded: 3.4.1
"""
user.us_totp_secrets = None
self.put(user) # type: ignore
class SQLAlchemyUserDatastore(SQLAlchemyDatastore, UserDatastore):
"""A UserDatastore implementation that assumes the
use of
`Flask-SQLAlchemy <https://pypi.python.org/pypi/flask-sqlalchemy/>`_
for datastore transactions.
:param db:
:param user_model: See :ref:`Models <models_topic>`.
:param role_model: See :ref:`Models <models_topic>`.
"""
def __init__(
self,
db: "flask_sqlalchemy.SQLAlchemy",
user_model: t.Type["User"],
role_model: t.Type["Role"],
):
SQLAlchemyDatastore.__init__(self, db)
UserDatastore.__init__(self, user_model, role_model)
def find_user(
self, case_insensitive: bool = False, **kwargs: t.Any
) -> t.Union["User", None]:
from sqlalchemy import func as alchemyFn
query = self.user_model.query
if config_value("JOIN_USER_ROLES") and hasattr(self.user_model, "roles"):
from sqlalchemy.orm import joinedload
query = query.options(joinedload("roles"))
if case_insensitive:
# While it is of course possible to pass in multiple keys to filter on
# that isn't the normal use case. If caller asks for case_insensitive
# AND gives multiple keys - throw an error.
if len(kwargs) > 1:
raise ValueError("Case insensitive option only supports single key")
attr, identifier = kwargs.popitem()
subquery = alchemyFn.lower(
getattr(self.user_model, attr)
) == alchemyFn.lower(identifier)
return query.filter(subquery).first()
else:
return query.filter_by(**kwargs).first()
def find_role(self, role: str) -> t.Union["Role", None]:
return self.role_model.query.filter_by(name=role).first() # type: ignore
class SQLAlchemySessionUserDatastore(SQLAlchemyUserDatastore, SQLAlchemyDatastore):
"""A UserDatastore implementation that directly uses
`SQLAlchemy's <https://docs.sqlalchemy.org/en/14/orm/session_basics.html>`_
session API.
:param session:
:param user_model: See :ref:`Models <models_topic>`.
:param role_model: See :ref:`Models <models_topic>`.
"""
def __init__(
self,
session: "sqlalchemy.orm.scoping.scoped_session",
user_model: t.Type["User"],
role_model: t.Type["Role"],
):
class PretendFlaskSQLAlchemyDb:
"""This is a pretend db object, so we can just pass in a session."""
def __init__(self, session):
self.session = session
SQLAlchemyUserDatastore.__init__(
self, PretendFlaskSQLAlchemyDb(session), user_model, role_model
)
def commit(self):
super().commit()
class MongoEngineUserDatastore(MongoEngineDatastore, UserDatastore):
"""A UserDatastore implementation that assumes the
use of
`Flask-MongoEngine <https://pypi.python.org/pypi/flask-mongoengine/>`_
for datastore transactions.
:param db:
:param user_model: See :ref:`Models <models_topic>`.
:param role_model: See :ref:`Models <models_topic>`.
"""
def __init__(
self,
db: "flask_mongoengine.MongoEngine",
user_model: t.Type["User"],
role_model: t.Type["Role"],
):
MongoEngineDatastore.__init__(self, db)
UserDatastore.__init__(self, user_model, role_model)
def find_user(self, case_insensitive=False, **kwargs):
from mongoengine.queryset.visitor import Q, QCombination
from mongoengine.errors import ValidationError
try:
if case_insensitive:
# While it is of course possible to pass in multiple keys to filter on
# that isn't the normal use case. If caller asks for case_insensitive
# AND gives multiple keys - throw an error.
if len(kwargs) > 1:
raise ValueError("Case insensitive option only supports single key")
attr, identifier = kwargs.popitem()
query = {f"{attr}__iexact": identifier}
return self.user_model.objects(**query).first()
else:
queries = map(lambda i: Q(**{i[0]: i[1]}), kwargs.items())
query = QCombination(QCombination.AND, queries)
return self.user_model.objects(query).first()
except ValidationError: # pragma: no cover
return None
def find_role(self, role):
return self.role_model.objects(name=role).first()
class PeeweeUserDatastore(PeeweeDatastore, UserDatastore):
"""A UserDatastore implementation that assumes the
use of
`Peewee Flask utils \
<https://docs.peewee-orm.com/en/latest/peewee/playhouse.html#flask-utils>`_
for datastore transactions.
:param db:
:param user_model: See :ref:`Models <models_topic>`.
:param role_model: See :ref:`Models <models_topic>`.
:param role_link:
"""
def __init__(self, db, user_model, role_model, role_link):
"""
:param db:
:param user_model: A user model class definition
:param role_model: A role model class definition
:param role_link: A model implementing the many-to-many user-role relation
"""
PeeweeDatastore.__init__(self, db)
UserDatastore.__init__(self, user_model, role_model)
self.UserRole = role_link
def find_user(self, case_insensitive=False, **kwargs):
from peewee import fn as peeweeFn
try:
if case_insensitive:
# While it is of course possible to pass in multiple keys to filter on
# that isn't the normal use case. If caller asks for case_insensitive
# AND gives multiple keys - throw an error.
if len(kwargs) > 1:
raise ValueError("Case insensitive option only supports single key")
attr, identifier = kwargs.popitem()
return self.user_model.get(
peeweeFn.lower(getattr(self.user_model, attr))
== peeweeFn.lower(identifier)
)
else:
return self.user_model.filter(**kwargs).get()
except self.user_model.DoesNotExist:
return None
def find_role(self, role):
try:
return self.role_model.filter(name=role).get()
except self.role_model.DoesNotExist:
return None
def create_user(self, **kwargs):
"""Creates and returns a new user from the given parameters."""
roles = kwargs.pop("roles", [])
user = self.user_model(**self._prepare_create_user_args(**kwargs))
user = self.put(user)
for role in roles:
self.add_role_to_user(user, role)
self.put(user)
return user
def add_role_to_user(self, user, role):
"""Adds a role to a user.
:param user: The user to manipulate
:param role: The role to add to the user
"""
role = self._prepare_role_modify_args(role)
result = self.UserRole.select().where(
self.UserRole.user == user.id, self.UserRole.role == role.id
)
if result.count():
return False
else:
self.put(self.UserRole.create(user=user.id, role=role.id))
return True
def remove_role_from_user(self, user, role):
"""Removes a role from a user.
:param user: The user to manipulate
:param role: The role to remove from the user
"""
role = self._prepare_role_modify_args(role)
result = self.UserRole.select().where(
self.UserRole.user == user, self.UserRole.role == role
)
if result.count():
query = self.UserRole.delete().where(
self.UserRole.user == user, self.UserRole.role == role
)
query.execute()
return True
else:
return False
class PonyUserDatastore(PonyDatastore, UserDatastore):
"""A UserDatastore implementation that assumes the
use of
| |
path_menu(self, searchpaths, operator, props_default, prop_filepath,
filter_ext, filter_path, display_name, add_operator):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class VIEW3D_MT_snap_pie(bpy_types.Menu, bpy_types._GenericUI):
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, _context):
'''
'''
pass
def draw_collapsible(self, context, layout):
'''
'''
pass
def draw_preset(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_menu(self, searchpaths, operator, props_default, prop_filepath,
filter_ext, filter_path, display_name, add_operator):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class VIEW3D_MT_surface_add(bpy_types.Menu, bpy_types._GenericUI):
bl_idname = None
''' '''
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, _context):
'''
'''
pass
def draw_collapsible(self, context, layout):
'''
'''
pass
def draw_preset(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_menu(self, searchpaths, operator, props_default, prop_filepath,
filter_ext, filter_path, display_name, add_operator):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class VIEW3D_MT_transform_base:
bl_category = None
''' '''
bl_label = None
''' '''
def draw(self, context):
'''
'''
pass
class VIEW3D_MT_transform_gizmo_pie(bpy_types.Menu, bpy_types._GenericUI):
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def draw_collapsible(self, context, layout):
'''
'''
pass
def draw_preset(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_menu(self, searchpaths, operator, props_default, prop_filepath,
filter_ext, filter_path, display_name, add_operator):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class VIEW3D_MT_uv_map(bpy_types.Menu, bpy_types._GenericUI):
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, _context):
'''
'''
pass
def draw_collapsible(self, context, layout):
'''
'''
pass
def draw_preset(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_menu(self, searchpaths, operator, props_default, prop_filepath,
filter_ext, filter_path, display_name, add_operator):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class VIEW3D_MT_vertex_group(bpy_types.Menu, bpy_types._GenericUI):
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def draw_collapsible(self, context, layout):
'''
'''
pass
def draw_preset(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_menu(self, searchpaths, operator, props_default, prop_filepath,
filter_ext, filter_path, display_name, add_operator):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class VIEW3D_MT_view(bpy_types.Menu, bpy_types._GenericUI):
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def draw_collapsible(self, context, layout):
'''
'''
pass
def draw_preset(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_menu(self, searchpaths, operator, props_default, prop_filepath,
filter_ext, filter_path, display_name, add_operator):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
| |
# This file is part of GridCal.
#
# GridCal is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GridCal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GridCal. If not, see <http://www.gnu.org/licenses/>.
from warnings import warn
import pandas as pd
import numpy as np
from GridCal.Engine.Devices.editable_device import DeviceType, GCProp
from GridCal.Engine.Devices.generator import Generator
class Battery(Generator):
"""
:ref:`Battery<battery>` (voltage controlled and dispatchable).
Arguments:
**name** (str, "batt"): Name of the battery
**active_power** (float, 0.0): Active power in MW
**power_factor** (float, 0.8): Power factor
**voltage_module** (float, 1.0): Voltage setpoint in per unit
**is_controlled** (bool, True): Is the unit voltage controlled (if so, the
connection bus becomes a PV bus)
**Qmin** (float, -9999): Minimum reactive power in MVAr
**Qmax** (float, 9999): Maximum reactive power in MVAr
**Snom** (float, 9999): Nominal apparent power in MVA
**Enom** (float, 9999): Nominal energy capacity in MWh
**p_min** (float, -9999): Minimum dispatchable power in MW
**p_max** (float, 9999): Maximum dispatchable power in MW
**op_cost** (float, 1.0): Operational cost in Eur (or other currency) per MW
**power_prof** (DataFrame, None): Pandas DataFrame with the active power
profile in MW
**power_factor_prof** (DataFrame, None): Pandas DataFrame with the power factor profile
**vset_prof** (DataFrame, None): Pandas DataFrame with the voltage setpoint
profile in per unit
**active** (bool, True): Is the battery active?
**Sbase** (float, 100): Base apparent power in MVA
**enabled_dispatch** (bool, True): Is the battery enabled for OPF?
**mttf** (float, 0.0): Mean time to failure in hours
**mttr** (float, 0.0): Mean time to recovery in hours
**charge_efficiency** (float, 0.9): Efficiency when charging
**discharge_efficiency** (float, 0.9): Efficiency when discharging
**max_soc** (float, 0.99): Maximum state of charge
**min_soc** (float, 0.3): Minimum state of charge
**soc** (float, 0.8): Current state of charge
**charge_per_cycle** (float, 0.1): Per unit of power to take per cycle when charging
**discharge_per_cycle** (float, 0.1): Per unit of power to deliver per cycle
when discharging
"""
def __init__(self, name='batt', idtag=None, active_power=0.0, power_factor=0.8, voltage_module=1.0,
is_controlled=True, Qmin=-9999, Qmax=9999, Snom=9999, Enom=9999, p_min=-9999, p_max=9999,
op_cost=1.0, power_prof=None, power_factor_prof=None, vset_prof=None, active=True, Sbase=100,
enabled_dispatch=True, mttf=0.0, mttr=0.0, charge_efficiency=0.9, discharge_efficiency=0.9,
max_soc=0.99, min_soc=0.3, soc=0.8, charge_per_cycle=0.1, discharge_per_cycle=0.1):
Generator.__init__(self, name=name,
idtag=idtag,
active_power=active_power,
power_factor=power_factor,
voltage_module=voltage_module,
is_controlled=is_controlled,
Qmin=Qmin, Qmax=Qmax, Snom=Snom,
power_prof=power_prof,
power_factor_prof=power_factor_prof,
vset_prof=vset_prof,
active=active,
p_min=p_min, p_max=p_max,
op_cost=op_cost,
Sbase=Sbase,
enabled_dispatch=enabled_dispatch,
mttf=mttf,
mttr=mttr)
# type of this device
self.device_type = DeviceType.BatteryDevice
# manually modify the editable headers
self.editable_headers = {'name': GCProp('', str, 'Name of the battery'),
'idtag': GCProp('', str, 'Unique ID'),
'bus': GCProp('', DeviceType.BusDevice, 'Connection bus name'),
'active': GCProp('', bool, 'Is the battery active?'),
'is_controlled': GCProp('', bool, 'Is this battery voltage-controlled?'),
'P': GCProp('MW', float, 'Active power'),
'Pf': GCProp('', float,
'Power factor (cos(fi)). This is used for non-controlled batteries.'),
'Vset': GCProp('p.u.', float, 'Set voltage. This is used for controlled batteries.'),
'Snom': GCProp('MVA', float, 'Nomnial power.'),
'Enom': GCProp('MWh', float, 'Nominal energy capacity.'),
'max_soc': GCProp('p.u.', float, 'Minimum state of charge.'),
'min_soc': GCProp('p.u.', float, 'Maximum state of charge.'),
'soc_0': GCProp('p.u.', float, 'Initial state of charge.'),
'charge_efficiency': GCProp('p.u.', float, 'Charging efficiency.'),
'discharge_efficiency': GCProp('p.u.', float, 'Discharge efficiency.'),
'discharge_per_cycle': GCProp('p.u.', float, ''),
'Qmin': GCProp('MVAr', float, 'Minimum reactive power.'),
'Qmax': GCProp('MVAr', float, 'Maximum reactive power.'),
'Pmin': GCProp('MW', float, 'Minimum active power. Used in OPF.'),
'Pmax': GCProp('MW', float, 'Maximum active power. Used in OPF.'),
'Cost': GCProp('e/MWh', float, 'Generation unitary cost. Used in OPF.'),
'enabled_dispatch': GCProp('', bool, 'Enabled for dispatch? Used in OPF.'),
'mttf': GCProp('h', float, 'Mean time to failure'),
'mttr': GCProp('h', float, 'Mean time to recovery')}
self.charge_efficiency = charge_efficiency
self.discharge_efficiency = discharge_efficiency
self.max_soc = max_soc
self.min_soc = min_soc
self.min_soc_charge = (self.max_soc + self.min_soc) / 2 # SoC state to force the battery charge
self.charge_per_cycle = charge_per_cycle # charge 10% per cycle
self.discharge_per_cycle = discharge_per_cycle
self.min_energy = Enom * self.min_soc
self.Enom = Enom
self.soc_0 = soc
self.soc = soc
self.energy = self.Enom * self.soc
self.energy_array = None
self.power_array = None
def copy(self):
"""
Make a copy of this object
Returns: :ref:`Battery<battery>` instance
"""
# create a new instance of the battery
batt = Battery()
batt.name = self.name
# Power (MVA)
# MVA = kV * kA
batt.P = self.P
batt.Pmax = self.Pmax
batt.Pmin = self.Pmin
# power profile for this load
batt.P_prof = self.P_prof
# Voltage module set point (p.u.)
batt.Vset = self.Vset
# voltage set profile for this load
batt.Vset_prof = self.Vset_prof
# minimum reactive power in per unit
batt.Qmin = self.Qmin
# Maximum reactive power in per unit
batt.Qmax = self.Qmax
# Nominal power MVA
batt.Snom = self.Snom
# Nominal energy MWh
batt.Enom = self.Enom
# Enable for active power dispatch?
batt.enabled_dispatch = self.enabled_dispatch
batt.mttf = self.mttf
batt.mttr = self.mttr
batt.charge_efficiency = self.charge_efficiency
batt.discharge_efficiency = self.discharge_efficiency
batt.max_soc = self.max_soc
batt.min_soc = self.min_soc
batt.min_soc_charge = self.min_soc_charge # SoC state to force the battery charge
batt.charge_per_cycle = self.charge_per_cycle # charge 10% per cycle
batt.discharge_per_cycle = self.discharge_per_cycle
batt.min_energy = self.min_energy
batt.soc_0 = self.soc
batt.soc = self.soc
batt.energy = self.energy
batt.energy_array = self.energy_array
batt.power_array = self.power_array
return batt
def get_properties_dict(self, version=3):
"""
Get json dictionary
:return: json-compatible dictionary
"""
if version == 2:
return {'id': self.idtag,
'type': 'battery',
'phases': 'ps',
'name': self.name,
'name_code': self.code,
'bus': self.bus.idtag,
'active': self.active,
'p': self.P,
'vset': self.Vset,
'pf': self.Pf,
'snom': self.Snom,
'enom': self.Enom,
'qmin': self.Qmin,
'qmax': self.Qmax,
'pmin': self.Pmin,
'pmax': self.Pmax,
'cost': self.Cost,
'charge_efficiency': self.charge_efficiency,
'discharge_efficiency': self.discharge_efficiency,
'min_soc': self.min_soc,
'max_soc': self.max_soc,
'soc_0': self.soc_0,
'min_soc_charge': self.min_soc_charge,
'charge_per_cycle': self.charge_per_cycle,
'discharge_per_cycle': self.discharge_per_cycle,
'technology': ""
}
elif version == 3:
return {'id': self.idtag,
'type': 'battery',
'phases': 'ps',
'name': self.name,
'name_code': self.code,
'bus': self.bus.idtag,
'active': self.active,
'is_controlled': self.is_controlled,
'p': self.P,
'vset': self.Vset,
'pf': self.Pf,
'snom': self.Snom,
'enom': self.Enom,
'qmin': self.Qmin,
'qmax': self.Qmax,
'pmin': self.Pmin,
'pmax': self.Pmax,
'cost': self.Cost,
'charge_efficiency': self.charge_efficiency,
'discharge_efficiency': self.discharge_efficiency,
'min_soc': self.min_soc,
'max_soc': self.max_soc,
'soc_0': self.soc_0,
'min_soc_charge': self.min_soc_charge,
'charge_per_cycle': self.charge_per_cycle,
'discharge_per_cycle': self.discharge_per_cycle,
'technology': ""
}
else:
return dict()
def get_profiles_dict(self):
"""
:return:
"""
if self.active_prof is None:
active_prof = list()
else:
active_prof = self.active_prof.tolist()
if self.P_prof is None:
P_prof = list()
else:
P_prof = self.P_prof.tolist()
if self.Pf_prof is None:
Pf_prof = list()
else:
Pf_prof = self.Pf_prof.tolist()
if self.Vset_prof is None:
Vset_prof = list()
else:
Vset_prof = self.Vset_prof.tolist()
return {'id': self.idtag,
'active': active_prof,
'p': P_prof,
'v': Vset_prof,
'pf': Pf_prof
}
def get_units_dict(self):
"""
Get units of the values
"""
return {'p': 'MW',
'vset': 'p.u.',
'pf': 'p.u.',
'snom': 'MVA',
'enom': 'MWh',
'qmin': 'MVAr',
'qmax': 'MVAr',
'pmin': 'MW',
'pmax': 'MW',
'cost': '€/MWh',
'charge_efficiency': 'p.u.',
'discharge_efficiency': 'p.u.',
'min_soc': 'p.u.',
'max_soc': 'p.u.',
'soc_0': 'p.u.',
'min_soc_charge': 'p.u.',
'charge_per_cycle': 'p.u.',
'discharge_per_cycle': 'p.u.'}
def initialize_arrays(self, index, arr=None, arr_in_pu=False):
"""
Create power profile based on index
:param index: time index associated
:param arr: array of values
:param arr_in_pu: is the array in per unit?
"""
if arr_in_pu:
dta = arr * self.P
else:
dta = np.ones(len(index)) * self.P if arr is None else arr
self.power_array = pd.DataFrame(data=dta.copy(), index=index, columns=[self.name])
self.energy_array = pd.DataFrame(data=dta.copy(), index=index, columns=[self.name])
def reset(self):
"""
Set the battery to its initial state
"""
self.soc = self.soc_0
self.energy = self.Enom * self.soc
self.power_array = self.P_prof.copy()
self.energy_array = self.P_prof.copy()
def process(self, P, dt, charge_if_needed=False):
"""
process a cycle in the battery
:param P: proposed power in MW
:param dt: time increment in hours
:param charge_if_needed: True / False
:return: Amount of power actually processed in MW
"""
# if self.Enom is None:
# raise Exception('You need to set the battery nominal power!')
if np.isnan(P):
warn('NaN found!!!!!!')
# pick the right efficiency value
if P >= 0.0:
eff = self.discharge_efficiency
# energy_per_cycle = self.nominal_energy * self.discharge_per_cycle
else:
eff = self.charge_efficiency
# amount of energy that the battery can take in a cycle of 1 hour
energy_per_cycle = self.Enom * self.charge_per_cycle
# compute the proposed energy. Later we check how much is actually possible
proposed_energy | |
<filename>qatrack/service_log/tests/test_views.py
import json
from django.conf import settings
from django.contrib.auth.models import Permission, User
from django.core.serializers.json import DjangoJSONEncoder
from django.core.urlresolvers import reverse
from django.db.models import Q
from django.test import RequestFactory, TestCase
from django.utils import timezone
from qatrack.parts import models as p_models
from qatrack.qa import models as qa_models
from qatrack.qa.tests import utils as qa_utils
from qatrack.service_log import models, views
from qatrack.service_log.tests import utils as sl_utils
class TestURLS(TestCase):
def setUp(self):
u = qa_utils.create_user(is_superuser=True, uname='user', pwd='<PASSWORD>')
g = qa_utils.create_group()
u.groups.add(g)
u.save()
self.client.login(username='user', password='<PASSWORD>')
def returns_code(self, url, method='get', code=200):
return getattr(self.client, method)(url).status_code == code
def test_qa_urls(self):
ses = sl_utils.create_service_event_status(is_default=True)
tis = qa_utils.create_status(is_default=True)
se = sl_utils.create_service_event()
u = qa_utils.create_unit()
utc = qa_utils.create_unit_test_collection(unit=u)
tli = qa_utils.create_test_list_instance(unit_test_collection=utc)
url_names_200 = (
('sl_dash', {}, ''),
('sl_new', {}, ''),
('sl_edit', {'pk': se.id}, ''),
('sl_details', {'pk': se.id}, ''),
('sl_list_all', {}, ''),
('rtsqa_list_for_event', {'se_pk': se.id}, ''),
('se_searcher', {}, '?q=%d&unit_id=%d' % (se.id, u.id)),
('tli_select', {'pk': utc.id, 'form': 'a_form'}, ''),
('tli_statuses', {}, '?tli_id=%d' % tli.id),
('unit_sa_utc', {}, '?unit_id=%d' % u.id),
('err', {}, ''),
('sl_unit_new', {}, ''),
('sl_unit_view_se', {}, ''),
('se_down_time', {}, ''),
('handle_unit_down_time', {}, ''),
)
url_names_404 = (
# Test urls that expect kwargs when not given any (reverse should render url for use in templates, but return 404)
('sl_details', {}, ''),
('tli_select', {}, ''),
('se_searcher', {}, ''),
('tli_statuses', {}, ''),
('unit_sa_utc', {}, ''),
)
for url, kwargs, q in url_names_200:
self.assertTrue(self.returns_code(reverse(url, kwargs=kwargs) + q))
for url, kwargs, q in url_names_404:
self.assertTrue(self.returns_code(reverse(url, kwargs=kwargs) + q, code=404))
class TestDashboard(TestCase):
def create_objects(self):
se_requires_review = sl_utils.create_service_event(is_review_required=True)
ses_default = sl_utils.create_service_event_status(is_default=True)
se_default_status = sl_utils.create_service_event(service_status=ses_default)
rtsqa_0001 = sl_utils.create_return_to_service_qa(add_test_list_instance=True) # qa_not_reviewed +1
rtsqa_0002 = sl_utils.create_return_to_service_qa() # qa_not_complete +1
rtsqa_0003 = sl_utils.create_return_to_service_qa(service_event=se_requires_review) # se_needing_review +1, qa_not_complete +1
rtsqa_0004 = sl_utils.create_return_to_service_qa(service_event=se_default_status) # se_default +1, qa_not_complete +1
self.user = qa_utils.create_user(is_superuser=True, uname='person')
def delete_objects(self):
models.ServiceEvent.objects.all().delete()
models.ReturnToServiceQA.objects.all().delete()
User.objects.all().delete()
def setUp(self):
self.factory = RequestFactory()
self.view = views.SLDashboard.as_view()
self.url = reverse('sl_dash')
def test_get_counts(self):
self.create_objects()
counts = views.SLDashboard().get_counts()
self.assertEqual(counts['qa_not_reviewed'], 1)
self.assertEqual(counts['qa_not_complete'], 3)
self.assertEqual(counts['se_needing_review'], 1)
self.assertEqual(counts['se_default']['count'], 1)
self.delete_objects()
def test_get_timeline(self):
self.create_objects()
request = self.factory.get(self.url)
request.user = self.user
response = self.view(request)
objs = response.context_data['recent_logs']
for o in objs:
self.assertTrue(isinstance(o, models.ServiceLog))
self.delete_objects()
class TestCreateServiceEvent(TestCase):
def setUp(self):
self.factory = RequestFactory()
self.view = views.CreateServiceEvent.as_view()
self.default_ses = sl_utils.create_service_event_status(is_default=True)
sl_utils.create_service_event_status()
qa_utils.create_status(is_default=True)
now = timezone.now()
self.u_1 = qa_utils.create_unit(name='u_1')
self.sa_1 = sl_utils.create_service_area(name='sa_1')
self.usa_1 = sl_utils.create_unit_service_area(unit=self.u_1, service_area=self.sa_1)
self.tl_1 = qa_utils.create_test_list(name='tl_1')
self.tli_1_1 = qa_utils.create_test_list_instance(
unit_test_collection=qa_utils.create_unit_test_collection(unit=self.u_1, test_collection=self.tl_1),
test_list=self.tl_1,
work_completed=now - timezone.timedelta(hours=1)
)
self.tli_1_2 = qa_utils.create_test_list_instance(
unit_test_collection=qa_utils.create_unit_test_collection(unit=self.u_1, test_collection=self.tl_1),
test_list=self.tl_1,
work_completed=now
)
self.se_1 = sl_utils.create_service_event(unit_service_area=self.usa_1)
self.u_2 = qa_utils.create_unit(name='u_2')
self.sa_2 = sl_utils.create_service_area(name='sa_2')
self.usa_2 = sl_utils.create_unit_service_area(unit=self.u_2, service_area=self.sa_2)
self.tl_2 = qa_utils.create_test_list(name='tl_2')
self.tli_2_1 = qa_utils.create_test_list_instance(
unit_test_collection=qa_utils.create_unit_test_collection(unit=self.u_2, test_collection=self.tl_2),
test_list=self.tl_2,
work_completed=now - timezone.timedelta(hours=1)
)
self.tli_2_2 = qa_utils.create_test_list_instance(
unit_test_collection=qa_utils.create_unit_test_collection(unit=self.u_2, test_collection=self.tl_2),
test_list=self.tl_2,
work_completed=now
)
self.se_2 = sl_utils.create_service_event(unit_service_area=self.usa_2)
self.url = reverse('sl_new')
self.url_delete = reverse('se_delete')
self.user = qa_utils.create_user(is_superuser=True)
self.client.login(username='user', password='password')
perm = Permission.objects.get(codename='can_have_hours')
user_can_hours = User.objects.get(username='user')
user_can_hours.user_permissions.add(perm)
user_group_has_hours = qa_utils.create_user(uname='in_group_with_hours')
group_has_hours = qa_utils.create_group(name='can_have_hours')
group_has_hours.permissions.add(perm)
user_group_has_hours.groups.add(group_has_hours)
sl_utils.create_third_party()
group_linked = qa_utils.create_group(name='linked')
user_group_linked = qa_utils.create_user(uname='user_in_group_linked')
user_group_linked.groups.add(group_linked)
self.gl_1 = sl_utils.create_group_linker(group=group_linked)
self.part = sl_utils.create_part(add_storage=True)
self.st = sl_utils.create_service_type()
self.sto = sl_utils.create_storage(quantity=2)
def test_initial_options(self):
response = self.client.get(self.url)
self.assertEqual(self.default_ses, response.context_data['form'].initial['service_status'])
self.assertTrue(response.context_data['form'].fields['service_area_field'].widget.attrs['disabled'])
self.assertTrue(response.context_data['form'].fields['service_event_related_field'].widget.attrs['disabled'])
self.assertTrue(response.context_data['form'].fields['initiated_utc_field'].widget.attrs['disabled'])
units = models.Unit.objects.all()
self.assertEqual(
list(units.values_list('id', 'name')),
list(response.context_data['form'].fields['unit_field'].queryset.values_list('id', 'name'))
)
models.ServiceType.objects.create(name='st_inactive', is_active=False)
service_types = models.ServiceType.objects.filter(is_active=True)
self.assertEqual(
list(service_types.values_list('id', 'name')),
list(response.context_data['form'].fields['service_type'].queryset.values_list('id', 'name'))
)
perm = Permission.objects.get(codename='can_have_hours')
user_with_hours = [('', '---------')]
for u in User.objects.filter(Q(groups__permissions=perm, is_active=True) | Q(user_permissions=perm, is_active=True)).distinct():
name = u.username if not u.first_name or not u.last_name else u.last_name + ', ' + u.first_name
user_with_hours.append(('user-' + str(u.id), name))
for tp in models.ThirdParty.objects.all():
user_with_hours.append(('tp-' + str(tp.id), tp.get_full_name()))
self.assertEqual(
set(user_with_hours),
set(response.context_data['hours_formset'].forms[0].fields['user_or_thirdparty'].widget.choices)
)
gl = models.GroupLinker.objects.all().first()
users_in_gl = User.objects.filter(groups=gl.group, is_active=True)
self.assertEqual(
list(users_in_gl), list(response.context_data['form'].fields['group_linker_%s' % gl.pk].queryset)
)
# Unit pre selected ----------------------------------------------------
unit = qa_models.Unit.objects.all().first()
response = self.client.get(self.url + '?u=%d' % unit.pk)
service_areas = models.UnitServiceArea.objects.filter(unit_id=unit.pk).values_list('service_area_id', 'service_area__name')
self.assertEqual(
list(service_areas),
list(response.context_data['form'].fields['service_area_field'].queryset.values_list('id', 'name'))
)
utc_initialed_by = qa_models.UnitTestCollection.objects.filter(unit_id=unit.pk, active=True)
utc_ib_list = (('', '---------'),)
for utc_ib in utc_initialed_by:
utc_ib_list += ((utc_ib.id, '(%s) %s' % (utc_ib.frequency, utc_ib.name)),)
self.assertEqual(
set(utc_ib_list),
set(response.context_data['form'].fields['initiated_utc_field'].widget.choices)
)
qa_utils.create_unit_test_collection(unit=unit, active=False)
self.assertEqual(
list(utc_initialed_by.values_list('id', 'name')),
list(response.context_data['rtsqa_formset'].forms[0].fields['unit_test_collection'].queryset.values_list('id', 'name'))
)
# Initiated by pre selected --------------------------------------------------
tli_ib = qa_models.TestListInstance.objects.filter(unit_test_collection__unit=unit).first()
response = self.client.get(self.url + '?ib=%s' % tli_ib.id)
self.assertEqual(
response.context_data['form'].initial['initiated_utc_field'],
tli_ib.unit_test_collection
)
self.assertEqual(
response.context_data['form'].initial['test_list_instance_initiated_by'].id,
tli_ib.id
)
self.assertEqual(response.context_data['form'].initial['unit_field'], unit)
def test_submit_valid(self):
st = sl_utils.create_service_type()
user = User.objects.filter(groups=self.gl_1.group).first()
user.user_permissions.add(Permission.objects.get(codename='can_have_hours'))
data = {
'datetime_service': timezone.now().strftime(settings.INPUT_DATE_FORMATS[0]),
'unit_field': self.u_1.id,
'unit_field_fake': self.u_1.id,
'service_area_field': self.usa_1.service_area.id,
'service_type': st.id,
'problem_description': 'uhhhhh ohhhh',
'work_description': 'stuff was done',
'safety_precautions': 'we were careful',
'qafollowup_notes': 'comment',
'service_event_related_field': [sl_utils.create_service_event(unit_service_area=self.usa_1).id],
'service_status': models.ServiceEventStatus.get_default().id,
'test_list_instance_initiated_by': self.tli_1_1.id,
'duration_service_time': '4321',
'duration_lost_time': '1234',
'initiated_utc_field': self.tli_1_1.unit_test_collection.id,
'group_linker_1': user.id,
'hours-INITIAL_FORMS': 0,
'hours-MAX_NUM_FORMS': 1000,
'hours-TOTAL_FORMS': 1,
'hours-MIN_NUM_FORMS': 0,
'parts-INITIAL_FORMS': 0,
'parts-MAX_NUM_FORMS': 1000,
'parts-TOTAL_FORMS': 1,
'parts-MIN_NUM_FORMS': 0,
'rtsqa-INITIAL_FORMS': 0,
'rtsqa-MAX_NUM_FORMS': 1000,
'rtsqa-TOTAL_FORMS': 1,
'rtsqa-MIN_NUM_FORMS': 0,
'hours-0-time': '100',
'hours-0-user_or_thirdparty': 'user-%s' % user.id,
'rtsqa-0-all_reviewed': self.tli_1_2.all_reviewed,
'rtsqa-0-unit_test_collection': self.tli_1_2.unit_test_collection.id,
'rtsqa-0-test_list_instance': self.tli_1_2.id,
'parts-0-quantity': 1,
'parts-0-part': self.part.id,
'parts-0-from_storage': self.part.storage.all().first().id
}
se_count = models.ServiceEvent.objects.count()
response = self.client.post(self.url, data=data)
se_id = models.ServiceEvent.objects.filter(problem_description='uhhhhh ohhhh').first().id
self.assertEqual(se_count + 1, models.ServiceEvent.objects.count())
self.assertEqual(1, p_models.PartUsed.objects.filter(service_event=se_id).count())
self.assertEqual(1, models.ReturnToServiceQA.objects.filter(service_event=se_id).count())
self.assertEqual(1, models.Hours.objects.filter(service_event=se_id).count())
self.assertEqual(response.status_code, 302)
def test_required_fields(self):
data = {
'datetime_service': '',
'unit_field': '',
'unit_field_fake': '',
'service_area_field': '',
'service_type': '',
'problem_description': '',
'service_status': '',
'hours-INITIAL_FORMS': 0,
'hours-MAX_NUM_FORMS': 1000,
'hours-TOTAL_FORMS': 1,
'hours-MIN_NUM_FORMS': 0,
'parts-INITIAL_FORMS': 0,
'parts-MAX_NUM_FORMS': 1000,
'parts-TOTAL_FORMS': 1,
'parts-MIN_NUM_FORMS': 0,
'rtsqa-INITIAL_FORMS': 0,
'rtsqa-MAX_NUM_FORMS': 1000,
'rtsqa-TOTAL_FORMS': 1,
'rtsqa-MIN_NUM_FORMS': 0,
}
response = self.client.post(self.url, data=data)
self.assertFalse(response.context_data['form'].is_valid())
for e in ['service_type', 'unit_field', 'service_area_field', 'datetime_service', 'problem_description', 'service_status']:
self.assertTrue(e in response.context_data['form'].errors)
def test_unreviewed_rtsqa(self):
ses_approved = sl_utils.create_service_event_status(
name='Approved', is_review_required=False, rts_qa_must_be_reviewed=True
)
test_status = qa_utils.create_status()
tl = qa_utils.create_test_list()
t = qa_utils.create_test()
qa_utils.create_test_list_membership(tl, t)
tli_unreviewed = qa_utils.create_test_list_instance(test_list=tl)
qa_utils.create_test_instance(
tli_unreviewed, unit_test_info=qa_utils.create_unit_test_info(unit=self.u_1), status=test_status
)
data = {
'datetime_service': timezone.now().strftime(settings.INPUT_DATE_FORMATS[0]),
'unit_field': self.u_1.id,
'service_area_field': self.sa_1.id,
'service_type': self.st.id,
'problem_description': 'problem',
'service_status': ses_approved.id,
'hours-INITIAL_FORMS': 0,
'hours-MAX_NUM_FORMS': 1000,
'hours-TOTAL_FORMS': 0,
'hours-MIN_NUM_FORMS': 0,
'parts-INITIAL_FORMS': 0,
'parts-MAX_NUM_FORMS': 1000,
'parts-TOTAL_FORMS': 0,
'parts-MIN_NUM_FORMS': 0,
'rtsqa-INITIAL_FORMS': 0,
'rtsqa-MAX_NUM_FORMS': 1000,
'rtsqa-TOTAL_FORMS': 1,
'rtsqa-MIN_NUM_FORMS': 0,
'rtsqa-0-all_reviewed': tli_unreviewed.all_reviewed,
'rtsqa-0-unit_test_collection': tli_unreviewed.unit_test_collection.id,
'rtsqa-0-test_list_instance': tli_unreviewed.id,
'rtsqa-0-id': '',
}
response = self.client.post(self.url, data=data)
self.assertTrue('service_status' in response.context_data['form'].errors)
data['rtsqa-0-all_reviewed'] = ''
data['rtsqa-0-test_list_instance'] = ''
response = self.client.post(self.url, data=data)
self.assertTrue('service_status' in response.context_data['form'].errors)
def test_formset_required_fields(self):
data = {
'datetime_service': timezone.now().strftime(settings.INPUT_DATE_FORMATS[0]),
'unit_field': self.u_1.id,
'service_area_field': self.sa_1.id,
'service_type': self.st.id,
'problem_description': 'problem',
'service_status': self.default_ses.id,
'hours-INITIAL_FORMS': 0,
'hours-MAX_NUM_FORMS': 1000,
'hours-TOTAL_FORMS': 1,
'hours-MIN_NUM_FORMS': 0,
'parts-INITIAL_FORMS': 0,
'parts-MAX_NUM_FORMS': 1000,
'parts-TOTAL_FORMS': 1,
'parts-MIN_NUM_FORMS': 0,
'rtsqa-INITIAL_FORMS': 0,
'rtsqa-MAX_NUM_FORMS': 1000,
'rtsqa-TOTAL_FORMS': 0,
'rtsqa-MIN_NUM_FORMS': 0,
'parts-0-part': self.part.id,
'hours-0-user_or_thirdparty': 'user-%s' % self.user.id
}
response = self.client.post(self.url, data=data)
self.assertTrue('quantity' in response.context_data['part_used_formset'].errors[0])
self.assertTrue('time' in response.context_data['hours_formset'].errors[0])
data['parts-0-part'] = ''
data['parts-0-quantity'] = 1
data['hours-0-user_or_thirdparty'] = ''
data['hours-0-time'] = '0030'
response = self.client.post(self.url, data=data)
self.assertTrue('part' in response.context_data['part_used_formset'].errors[0])
self.assertTrue('user_or_thirdparty' in response.context_data['hours_formset'].errors[0])
def test_delete_service_event(self):
psc = p_models.PartStorageCollection.objects.first()
pu = p_models.PartUsed.objects.create(
part=psc.part, from_storage=psc.storage, quantity=1, service_event=self.se_1
)
initial_quantity = pu.quantity
self.se_1.set_inactive()
psc.refresh_from_db()
self.assertEqual(initial_quantity + 1, psc.quantity)
self.se_1.set_active()
psc.refresh_from_db()
self.assertEqual(initial_quantity, psc.quantity)
class TestEditServiceEvent(TestCase):
def setUp(self):
self.factory = RequestFactory()
self.view = views.UpdateServiceEvent.as_view()
self.default_ses = sl_utils.create_service_event_status(is_default=True)
self.approved_ses = sl_utils.create_service_event_status(
is_review_required=False, rts_qa_must_be_reviewed=True
)
sl_utils.create_service_event_status()
qa_utils.create_status(is_default=True)
now = timezone.now()
self.u_1 = qa_utils.create_unit(name='u_1')
self.sa_1 = sl_utils.create_service_area(name='sa_1')
self.usa_1 = sl_utils.create_unit_service_area(unit=self.u_1, service_area=self.sa_1)
self.tl_1 = qa_utils.create_test_list(name='tl_1')
self.tli_1_1 = qa_utils.create_test_list_instance(
unit_test_collection=qa_utils.create_unit_test_collection(unit=self.u_1, test_collection=self.tl_1),
test_list=self.tl_1,
work_completed=now - timezone.timedelta(hours=1)
)
self.tli_1_2 = qa_utils.create_test_list_instance(
unit_test_collection=qa_utils.create_unit_test_collection(unit=self.u_1, test_collection=self.tl_1),
test_list=self.tl_1,
work_completed=now
)
self.se_1 = sl_utils.create_service_event(unit_service_area=self.usa_1)
self.u_2 = qa_utils.create_unit(name='u_2')
self.sa_2 = sl_utils.create_service_area(name='sa_2')
self.usa_2 = sl_utils.create_unit_service_area(unit=self.u_2, service_area=self.sa_2)
self.tl_2 = qa_utils.create_test_list(name='tl_2')
self.tli_2_1 = qa_utils.create_test_list_instance(
unit_test_collection=qa_utils.create_unit_test_collection(unit=self.u_2, test_collection=self.tl_2),
test_list=self.tl_2,
work_completed=now - timezone.timedelta(hours=1)
)
self.tli_2_2 = qa_utils.create_test_list_instance(
unit_test_collection=qa_utils.create_unit_test_collection(unit=self.u_2, test_collection=self.tl_2),
test_list=self.tl_2,
work_completed=now
)
self.se_2 = sl_utils.create_service_event(unit_service_area=self.usa_2)
self.url = reverse('sl_new')
self.user = qa_utils.create_user(is_superuser=True)
self.client.login(username='user', password='password')
perm = Permission.objects.get(codename='can_have_hours')
user_can_hours = User.objects.get(username='user')
user_can_hours.user_permissions.add(perm)
user_group_has_hours = qa_utils.create_user(uname='in_group_with_hours')
group_has_hours = qa_utils.create_group(name='can_have_hours')
group_has_hours.permissions.add(perm)
user_group_has_hours.groups.add(group_has_hours)
sl_utils.create_third_party()
group_linked = qa_utils.create_group(name='linked')
user_group_linked = qa_utils.create_user(uname='user_in_group_linked')
user_group_linked.groups.add(group_linked)
self.gl_1 = sl_utils.create_group_linker(group=group_linked)
self.part = sl_utils.create_part(add_storage=True)
self.st = sl_utils.create_service_type()
self.se = sl_utils.create_service_event(
unit_service_area=self.usa_1,
service_status=self.default_ses,
service_type=self.st
)
self.url = reverse('sl_edit', kwargs={"pk": self.se.pk})
self.data = {
'datetime_service': timezone.now().strftime(settings.INPUT_DATE_FORMATS[0]),
'service_status': self.default_ses.id,
'service_type': self.st.id,
'is_review_required': 0,
'safety_precautions': 'safety_precautions',
'problem_description': 'problem_description',
'service_area_field': self.se.unit_service_area.service_area.id,
'unit_field': self.se.unit_service_area.unit.id,
'unit_field_fake': self.se.unit_service_area.unit.id,
'qafollowup_notes': 'qafollowup_notes',
'test_list_instance_initiated_by': self.tli_1_1.id,
'duration_lost_time': '0100',
'duration_service_time': '0100',
'hours-INITIAL_FORMS': 0,
'hours-MAX_NUM_FORMS': 1000,
'hours-TOTAL_FORMS': 0,
'hours-MIN_NUM_FORMS': 0,
'parts-INITIAL_FORMS': 0,
'parts-MAX_NUM_FORMS': 1000,
'parts-TOTAL_FORMS': 0,
'parts-MIN_NUM_FORMS': 0,
'rtsqa-INITIAL_FORMS': 0,
'rtsqa-MAX_NUM_FORMS': 1000,
'rtsqa-TOTAL_FORMS': 0,
'rtsqa-MIN_NUM_FORMS': 0,
}
def test_initial_unit(self):
sl_utils.create_return_to_service_qa(
service_event=self.se_1, unit_test_collection=self.tli_1_1.unit_test_collection,
add_test_list_instance=self.tli_1_1
)
def test_edit_service_event_valid(self):
response = self.client.get(self.url)
data = self.data
data['hours-TOTAL_FORMS'] = 1
data['parts-TOTAL_FORMS'] = 1
data['rtsqa-TOTAL_FORMS'] = 1
data['parts-0-part'] = self.part.id
data['parts-0-quantity'] = 1
data['hours-0-user_or_thirdparty'] = 'user-%s' % self.user.id
data['hours-0-time'] = '0030'
data['rtsqa-0-all_reviewed'] = self.tli_1_2.all_reviewed
data['rtsqa-0-unit_test_collection'] = self.tli_1_2.unit_test_collection.id
data['rtsqa-0-test_list_instance'] = self.tli_1_2.id
data['rtsqa-0-id'] = ''
response = self.client.post(self.url, data=data)
self.assertEqual(response.status_code, 302)
def test_edit_status_invalid(self):
data = self.data
data['service_status'] = self.approved_ses.id
data['hours-TOTAL_FORMS'] = 0
data['parts-TOTAL_FORMS'] = 0
data['rtsqa-TOTAL_FORMS'] = 0
data['parts-0-part'] = self.part.id
data['parts-0-quantity'] = 1
data['hours-0-user_or_thirdparty'] = 'user-%s' % self.user.id
data['hours-0-time'] = '0030'
data['rtsqa-0-all_reviewed'] | |
# coding: utf-8
"""
Cloud Manager API
This API allows access to Cloud Manager programs, pipelines, and environments by an authorized technical account created through the Adobe I/O Console. The base url for this API is https://cloudmanager.adobe.io, e.g. to get the list of programs for an organization, you would make a GET request to https://cloudmanager.adobe.io/api/programs (with the correct set of headers as described below). This swagger file can be downloaded from https://raw.githubusercontent.com/AdobeDocs/cloudmanager-api-docs/master/swagger-specs/api.yaml. # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: <EMAIL>
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from pyaemcloudmanagerapi.configuration import Configuration
class PipelineExecution(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'id': 'str',
'program_id': 'str',
'pipeline_id': 'str',
'artifacts_version': 'str',
'user': 'str',
'status': 'str',
'trigger': 'str',
'created_at': 'datetime',
'updated_at': 'datetime',
'finished_at': 'datetime',
'embedded': 'PipelineExecutionEmbedded',
'links': 'PipelineExecutionLinks'
}
attribute_map = {
'id': 'id',
'program_id': 'programId',
'pipeline_id': 'pipelineId',
'artifacts_version': 'artifactsVersion',
'user': 'user',
'status': 'status',
'trigger': 'trigger',
'created_at': 'createdAt',
'updated_at': 'updatedAt',
'finished_at': 'finishedAt',
'embedded': '_embedded',
'links': '_links'
}
def __init__(self, id=None, program_id=None, pipeline_id=None, artifacts_version=None, user=None, status=None, trigger=None, created_at=None, updated_at=None, finished_at=None, embedded=None, links=None, local_vars_configuration=None): # noqa: E501
"""PipelineExecution - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._id = None
self._program_id = None
self._pipeline_id = None
self._artifacts_version = None
self._user = None
self._status = None
self._trigger = None
self._created_at = None
self._updated_at = None
self._finished_at = None
self._embedded = None
self._links = None
self.discriminator = None
if id is not None:
self.id = id
if program_id is not None:
self.program_id = program_id
if pipeline_id is not None:
self.pipeline_id = pipeline_id
if artifacts_version is not None:
self.artifacts_version = artifacts_version
if user is not None:
self.user = user
if status is not None:
self.status = status
if trigger is not None:
self.trigger = trigger
if created_at is not None:
self.created_at = created_at
if updated_at is not None:
self.updated_at = updated_at
if finished_at is not None:
self.finished_at = finished_at
if embedded is not None:
self.embedded = embedded
if links is not None:
self.links = links
@property
def id(self):
"""Gets the id of this PipelineExecution. # noqa: E501
Pipeline execution identifier # noqa: E501
:return: The id of this PipelineExecution. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this PipelineExecution.
Pipeline execution identifier # noqa: E501
:param id: The id of this PipelineExecution. # noqa: E501
:type: str
"""
self._id = id
@property
def program_id(self):
"""Gets the program_id of this PipelineExecution. # noqa: E501
Identifier of the program. Unique within the space. # noqa: E501
:return: The program_id of this PipelineExecution. # noqa: E501
:rtype: str
"""
return self._program_id
@program_id.setter
def program_id(self, program_id):
"""Sets the program_id of this PipelineExecution.
Identifier of the program. Unique within the space. # noqa: E501
:param program_id: The program_id of this PipelineExecution. # noqa: E501
:type: str
"""
self._program_id = program_id
@property
def pipeline_id(self):
"""Gets the pipeline_id of this PipelineExecution. # noqa: E501
Identifier of the pipeline. Unique within the space. # noqa: E501
:return: The pipeline_id of this PipelineExecution. # noqa: E501
:rtype: str
"""
return self._pipeline_id
@pipeline_id.setter
def pipeline_id(self, pipeline_id):
"""Sets the pipeline_id of this PipelineExecution.
Identifier of the pipeline. Unique within the space. # noqa: E501
:param pipeline_id: The pipeline_id of this PipelineExecution. # noqa: E501
:type: str
"""
self._pipeline_id = pipeline_id
@property
def artifacts_version(self):
"""Gets the artifacts_version of this PipelineExecution. # noqa: E501
Version of the artifacts generated during this execution # noqa: E501
:return: The artifacts_version of this PipelineExecution. # noqa: E501
:rtype: str
"""
return self._artifacts_version
@artifacts_version.setter
def artifacts_version(self, artifacts_version):
"""Sets the artifacts_version of this PipelineExecution.
Version of the artifacts generated during this execution # noqa: E501
:param artifacts_version: The artifacts_version of this PipelineExecution. # noqa: E501
:type: str
"""
self._artifacts_version = artifacts_version
@property
def user(self):
"""Gets the user of this PipelineExecution. # noqa: E501
AdobeID who started the pipeline. Empty for auto triggered builds # noqa: E501
:return: The user of this PipelineExecution. # noqa: E501
:rtype: str
"""
return self._user
@user.setter
def user(self, user):
"""Sets the user of this PipelineExecution.
AdobeID who started the pipeline. Empty for auto triggered builds # noqa: E501
:param user: The user of this PipelineExecution. # noqa: E501
:type: str
"""
self._user = user
@property
def status(self):
"""Gets the status of this PipelineExecution. # noqa: E501
Status of the execution # noqa: E501
:return: The status of this PipelineExecution. # noqa: E501
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this PipelineExecution.
Status of the execution # noqa: E501
:param status: The status of this PipelineExecution. # noqa: E501
:type: str
"""
allowed_values = ["NOT_STARTED", "RUNNING", "CANCELLING", "CANCELLED", "FINISHED", "ERROR", "FAILED"] # noqa: E501
if self.local_vars_configuration.client_side_validation and status not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `status` ({0}), must be one of {1}" # noqa: E501
.format(status, allowed_values)
)
self._status = status
@property
def trigger(self):
"""Gets the trigger of this PipelineExecution. # noqa: E501
How the execution was triggered. # noqa: E501
:return: The trigger of this PipelineExecution. # noqa: E501
:rtype: str
"""
return self._trigger
@trigger.setter
def trigger(self, trigger):
"""Sets the trigger of this PipelineExecution.
How the execution was triggered. # noqa: E501
:param trigger: The trigger of this PipelineExecution. # noqa: E501
:type: str
"""
allowed_values = ["ON_COMMIT", "MANUAL", "SCHEDULE", "PUSH_UPGRADES"] # noqa: E501
if self.local_vars_configuration.client_side_validation and trigger not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `trigger` ({0}), must be one of {1}" # noqa: E501
.format(trigger, allowed_values)
)
self._trigger = trigger
@property
def created_at(self):
"""Gets the created_at of this PipelineExecution. # noqa: E501
Start time # noqa: E501
:return: The created_at of this PipelineExecution. # noqa: E501
:rtype: datetime
"""
return self._created_at
@created_at.setter
def created_at(self, created_at):
"""Sets the created_at of this PipelineExecution.
Start time # noqa: E501
:param created_at: The created_at of this PipelineExecution. # noqa: E501
:type: datetime
"""
self._created_at = created_at
@property
def updated_at(self):
"""Gets the updated_at of this PipelineExecution. # noqa: E501
Date of last status change # noqa: E501
:return: The updated_at of this PipelineExecution. # noqa: E501
:rtype: datetime
"""
return self._updated_at
@updated_at.setter
def updated_at(self, updated_at):
"""Sets the updated_at of this PipelineExecution.
Date of last status change # noqa: E501
:param updated_at: The updated_at of this PipelineExecution. # noqa: E501
:type: datetime
"""
self._updated_at = updated_at
@property
def finished_at(self):
"""Gets the finished_at of this PipelineExecution. # noqa: E501
Date the execution reached a final state # noqa: E501
:return: The finished_at of this PipelineExecution. # noqa: E501
:rtype: datetime
"""
return self._finished_at
@finished_at.setter
def finished_at(self, finished_at):
"""Sets the finished_at of this PipelineExecution.
Date the execution reached a final state # noqa: E501
:param finished_at: The finished_at of this PipelineExecution. # noqa: E501
:type: datetime
"""
self._finished_at = finished_at
@property
def embedded(self):
"""Gets the embedded of this PipelineExecution. # noqa: E501
:return: The embedded of this PipelineExecution. # noqa: E501
:rtype: PipelineExecutionEmbedded
"""
return self._embedded
@embedded.setter
def embedded(self, embedded):
"""Sets the embedded of this PipelineExecution.
:param embedded: The embedded of this PipelineExecution. # noqa: E501
:type: PipelineExecutionEmbedded
"""
self._embedded = embedded
@property
def links(self):
"""Gets the links of this PipelineExecution. # noqa: E501
:return: The links of this PipelineExecution. # noqa: E501
:rtype: PipelineExecutionLinks
"""
return self._links
@links.setter
def links(self, links):
"""Sets the links of this PipelineExecution.
:param links: The links of this PipelineExecution. # noqa: E501
:type: PipelineExecutionLinks
"""
self._links = links
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] | |
<reponame>LinkGeoML/LGM-Classification
import numpy as np
import pandas as pd
import geopandas as gpd
from shapely.geometry import Point
from shapely.wkt import loads
import itertools
import os
from collections import Counter
import pickle
from sklearn.preprocessing import LabelEncoder, MinMaxScaler
from sklearn.feature_selection import SelectPercentile, chi2
import adjacency_features as af
import textual_features as tf
# import geometric_features as gf
# import matching as m
import osm_utilities as osm_ut
import writers as wrtrs
from config import config
feature_module_map = {
'classes_in_radius_bln': af,
'classes_in_radius_cnt': af,
'classes_in_street_and_radius_bln': af,
'classes_in_street_and_radius_cnt': af,
'classes_in_neighbors_bln': af,
'classes_in_neighbors_cnt': af,
'classes_in_street_radius_bln': af,
'classes_in_street_radius_cnt': af,
'similarity_per_class': tf,
'top_k_terms': tf,
'top_k_trigrams': tf,
'top_k_fourgrams': tf
}
features_getter_map = {
'classes_in_radius_bln': 'get_classes_in_radius_bln',
'classes_in_radius_cnt': 'get_classes_in_radius_cnt',
'classes_in_street_and_radius_bln': 'get_classes_in_street_and_radius_bln',
'classes_in_street_and_radius_cnt': 'get_classes_in_street_and_radius_cnt',
'classes_in_neighbors_bln': 'get_classes_in_neighbors_bln',
'classes_in_neighbors_cnt': 'get_classes_in_neighbors_cnt',
'classes_in_street_radius_bln': 'get_classes_in_street_radius_bln',
'classes_in_street_radius_cnt': 'get_classes_in_street_radius_cnt',
'similarity_per_class': 'get_similarity_per_class',
'top_k_terms': 'get_top_k_terms',
'top_k_trigrams': 'get_top_k_trigrams',
'top_k_fourgrams': 'get_top_k_fourgrams'
}
features_params_map = {
'classes_in_radius_bln': 'classes_in_radius_thr',
'classes_in_radius_cnt': 'classes_in_radius_thr',
'classes_in_street_and_radius_bln': 'classes_in_street_and_radius_thr',
'classes_in_street_and_radius_cnt': 'classes_in_street_and_radius_thr',
'classes_in_neighbors_bln': 'classes_in_neighbors_thr',
'classes_in_neighbors_cnt': 'classes_in_neighbors_thr',
'classes_in_street_radius_bln': 'classes_in_street_radius_thr',
'classes_in_street_radius_cnt': 'classes_in_street_radius_thr',
'top_k_terms': 'top_k_terms_pct',
'top_k_trigrams': 'top_k_trigrams_pct',
'top_k_fourgrams': 'top_k_fourgrams_pct'
}
features_getter_args_map = {
'classes_in_radius_bln': ('poi_gdf', 'poi_index_path', 'nlabels', 'label_map', 'param'),
'classes_in_radius_cnt': ('poi_gdf', 'poi_index_path', 'nlabels', 'label_map', 'param'),
'classes_in_street_and_radius_bln': ('poi_gdf', 'street_gdf', 'pois_by_street', 'nlabels', 'label_map', 'geometry_map', 'param'),
'classes_in_street_and_radius_cnt': ('poi_gdf', 'street_gdf', 'pois_by_street', 'nlabels', 'label_map', 'geometry_map', 'param'),
'classes_in_neighbors_bln': ('poi_gdf', 'poi_index_path', 'nlabels', 'label_map', 'param'),
'classes_in_neighbors_cnt': ('poi_gdf', 'poi_index_path', 'nlabels', 'label_map', 'param'),
'classes_in_street_radius_bln': ('poi_gdf', 'street_gdf', 'nlabels', 'label_map', 'geometry_map', 'param'),
'classes_in_street_radius_cnt': ('poi_gdf', 'street_gdf', 'nlabels', 'label_map', 'geometry_map', 'param'),
'similarity_per_class': ('poi_gdf', 'textual_index_path', 'nlabels'),
'top_k_terms': ('poi_gdf', 'names', 'param'),
'top_k_trigrams': ('poi_gdf', 'names', 'param'),
'top_k_fourgrams': ('poi_gdf', 'names', 'param')
}
def load_poi_gdf(poi_fpath):
"""
Loads pois in *poi_fpath* into a geopandas.GeoDataFrame and project their \
geometries.
Args:
poi_fpath (str): Path to file containing the pois
Returns:
geopandas.GeoDataFrame
"""
poi_df = pd.read_csv(poi_fpath)
poi_df['geometry'] = poi_df.apply(
lambda x: Point(x[config.lon_col], x[config.lat_col]), axis=1)
poi_gdf = gpd.GeoDataFrame(poi_df, geometry='geometry')
poi_gdf.crs = {'init': f'epsg:{config.poi_crs}'}
poi_gdf = poi_gdf.to_crs({'init': 'epsg:3857'})
poi_gdf['lon'] = poi_gdf.apply(lambda p: p.geometry.coords[0][0], axis=1)
poi_gdf['lat'] = poi_gdf.apply(lambda p: p.geometry.coords[0][1], axis=1)
return poi_gdf
def encode_labels(poi_gdf, encoder=None):
"""
Encodes target column to with integer values.
Args:
poi_gdf (geopandas.GeoDataFrame): The GeoDataFrame containing the \
column to be encoded
encoder (sklearn.preprocessing.LabelEncoder, optional): The label \
encoder to be utilized
Returns:
tuple:
geopandas.GeoDataFrame: The GeoDataFrame with the encoded column
sklearn.preprocessing.LabelEncoder: The label encoder utilized
"""
if encoder is None:
encoder = LabelEncoder()
poi_gdf['label'] = encoder.fit_transform(poi_gdf[config.label_col])
else:
poi_gdf = poi_gdf[poi_gdf[config.label_col].isin(encoder.classes_)].reset_index(drop=True)
poi_gdf['label'] = encoder.transform(poi_gdf[config.label_col])
return poi_gdf, encoder
def load_street_gdf(street_fpath):
"""
Loads streets in *street_fpath* into a geopandas.GeoDataFrame and project \
their geometries.
Args:
street_fpath (str): Path to file containing the streets
Returns:
geopandas.GeoDataFrame
"""
street_df = pd.read_csv(street_fpath)
street_df['geometry'] = street_df['geometry'].apply(lambda x: loads(x))
street_gdf = gpd.GeoDataFrame(street_df, geometry='geometry')
street_gdf.crs = {'init': f'epsg:{config.osm_crs}'}
street_gdf = street_gdf.to_crs({'init': 'epsg:3857'})
return street_gdf
# def load_poly_gdf(poly_fpath):
# poly_df = pd.read_csv(poly_fpath)
# poly_df['geometry'] = poly_df['geometry'].apply(lambda x: loads(x))
# poly_gdf = gpd.GeoDataFrame(poly_df, geometry='geometry')
# poly_gdf.crs = {'init': f'epsg:{config.osm_crs}'}
# poly_gdf = poly_gdf.to_crs({'init': 'epsg:3857'})
# return poly_gdf
def get_bbox_coords(poi_gdf):
"""
Returns a bounding box containing all *poi_gdf*'s pois.
Args:
poi_gdf (geopandas.GeoDataFrame): Contains the pois
Returns:
tuple: The bounding box coords as (south, west, north, east)
"""
poi_gdf = poi_gdf.to_crs({'init': f'epsg:{config.osm_crs}'})
min_lon, min_lat, max_lon, max_lat = poi_gdf.geometry.total_bounds
return (min_lat, min_lon, max_lat, max_lon)
def get_required_external_files(poi_gdf, feature_sets_path):
"""
Checks if external files are required and if so, downloads them using the \
Overpass API.
Args:
poi_gdf (geopandas.GeoDataFrame): Contains pois in order to define \
the area to query with Overpass API
feature_sets_path (str): Path to store the downloaded elements
Returns:
None
"""
if (
'classes_in_street_and_radius_bln' in config.included_adjacency_features or
'classes_in_street_and_radius_cnt' in config.included_adjacency_features or
'classes_in_street_radius_bln' in config.included_adjacency_features or
'classes_in_street_radius_cnt' in config.included_adjacency_features
):
osm_ut.download_osm_streets(get_bbox_coords(poi_gdf), feature_sets_path)
# if config.included_geometric_features:
# osm_ut.download_osm_polygons(get_bbox_coords(poi_gdf), feature_sets_path)
return
def ngrams(n, word):
"""
Generator of all *n*-grams of *word*.
Args:
n (int): The length of character ngrams to be extracted
word (str): The word of which the ngrams are to be extracted
Yields:
str: ngram
"""
for i in range(len(word)-n-1):
yield word[i:i+n]
def get_top_k(names, k, mode='term'):
"""
Extracts the top *k* % terms or ngrams of *names*, based on *mode*.
Args:
names (list): Contains the names to be considered
k (float): Percentage of top terms or ngrams to be considered
mode (str, optional): May be 'term', 'trigram' or 'fourgram'
Returns:
list: Contains the top k terms or ngrams
"""
if mode == 'trigram':
cnt = Counter(ngram for word in names for ngram in ngrams(3, word))
elif mode == 'fourgram':
cnt = Counter(ngram for word in names for ngram in ngrams(4, word))
else:
cnt = Counter(names)
return [t[0] for t in cnt.most_common(int(len(cnt) * k))]
def normalize_features(X, train_idxs, scaler=None):
"""
Normalize features to [0, 1].
Args:
X (numpy.ndarray): Features array to be normalized
train_idxs (numpy.ndarray): Contains the train indexes
scaler (sklearn.preprocessing.MinMaxScaler, optional): Scaler to be \
utilized
Returns:
tuple:
numpy.ndarray: The normalized features array
sklearn.preprocessing.MinMaxScaler: The scaler utilized
"""
if scaler is None:
scaler = MinMaxScaler()
X_ = scaler.fit_transform(X[train_idxs])
for idx, i in enumerate(train_idxs):
X[i] = X_[idx]
test_idxs = [r for r in range(len(X)) if r not in train_idxs]
if test_idxs:
X_ = scaler.transform(X[test_idxs])
for idx, i in enumerate(test_idxs):
X[i] = X_[idx]
else:
X = scaler.transform(X)
return X, scaler
def get_pois_by_street(poi_gdf, street_gdf):
"""
Matches each poi in *poi_gdf* to its nearest street.
Args:
poi_gdf (geopandas.GeoDataFrame): Contains pois to be matched to \
a street
street_gdf (geopandas.GeoDataFrame): Contains streets to search among \
them for the nearest to each poi
Returns:
dict: Has streets ids as keys and a list containing the pois which \
belong to each street as values
"""
street_index = street_gdf.sindex
pois_by_street = dict((s, []) for s in range(len(street_gdf)))
for poi in poi_gdf.itertuples():
poi_coords = (poi.lon, poi.lat)
candidates = list(street_index.nearest(poi_coords))
nearest = candidates[np.argmin([
Point(poi_coords).distance(street_gdf.iloc[c]['geometry'])
for c in candidates
])]
pois_by_street[nearest].append(poi.Index)
return pois_by_street
def create_args_dict(poi_gdf, train_idxs, required_args, read_path, write_path):
"""
Initializes and prepares structures required during features extraction.
Args:
poi_gdf (geopandas.GeoDataFrame): Contains the pois for which \
features will be created
train_idxs (numpy.ndarray): Contains the train indexes
required_args (set): Contains the names of the required args
read_path (str): Path to read from
write_path (str): Path to write to
Returns:
dict: Containing arguments names as keys and their corresponding \
structures as values
"""
args = {'poi_gdf': poi_gdf, 'nlabels': poi_gdf['label'].nunique()}
if 'label_map' in required_args:
args['label_map'] = poi_gdf.iloc[train_idxs]['label'].values.tolist()
if 'geometry_map' in required_args:
args['geometry_map'] = list(poi_gdf.iloc[train_idxs]['geometry'].values)
if 'poi_index_path' in required_args:
args['poi_index_path'] = write_path + '/poi_index.pkl'
af.create_poi_index(poi_gdf.iloc[train_idxs].reset_index(), args['poi_index_path'])
if 'street_gdf' in required_args:
street_csv_path = read_path + '/osm_streets.csv'
args['street_gdf'] = load_street_gdf(street_csv_path)
args['pois_by_street'] = get_pois_by_street(poi_gdf.iloc[train_idxs].reset_index(), args['street_gdf'])
if 'textual_index_path' in required_args:
args['textual_index_path'] = write_path + '/textual_index'
tf.create_textual_index(poi_gdf.iloc[train_idxs].reset_index(), args['textual_index_path'])
if 'names' in required_args:
args['names'] = ' '.join(list(poi_gdf.iloc[train_idxs][config.name_col])).split()
return args
def create_single_feature(f, args, train_idxs, norm, scaler):
"""
Creates the features array given a feature's name *f*.
Args:
f (str): Feature name to be created
args (dict): Containing the required arguments for feature *f*
train_idxs (numpy.ndarray): Contains the train indexes
norm (boolean): Indicating whether the feature should be normalized \
or not
scaler (sklearn.preprocessing.MinMaxScaler): The scaler to be utilized
Returns:
tuple:
numpy.ndarray: The features array of feature *f*
sklearn.preprocessing.MinMaxScaler: The scaler utilized
"""
X = getattr(feature_module_map[f], features_getter_map[f])(
*[args[arg] for arg in features_getter_args_map[f]])
if scaler is not None:
return normalize_features(X, None, scaler)
elif norm is True:
return normalize_features(X, train_idxs)
else:
return X, None
def create_single_features(poi_gdf, train_idxs, fold_path):
"""
Creates all the included features arrays and saves them in *fold_path*.
Args:
poi_gdf (geopandas.GeoDataFrame): Contains the pois for which the \
features will be created
train_idxs (numpy.ndarray): Contains the train indexes
fold_path (str): Path to save features arrays
Returns:
None
"""
os.makedirs(fold_path + '/tmp')
included_features = config.included_adjacency_features + config.included_textual_features
required_args = set([arg for f in included_features for arg in features_getter_args_map[f]])
args = create_args_dict(poi_gdf, train_idxs, required_args, os.path.dirname(fold_path), fold_path)
for f in included_features:
norm = True if f in config.normalized_features else False
if f not in features_params_map:
X, _ = create_single_feature(f, args, train_idxs, norm, None)
np.save(fold_path + f'/tmp/{f}.npy', X)
else:
for p in getattr(config, features_params_map[f]):
args['param'] = p
X, _ = create_single_feature(f, args, train_idxs, norm, None)
np.save(fold_path + f'/tmp/{f}_{p}.npy', X)
return
def create_concatenated_features(poi_gdf, train_idxs, test_idxs, fold_path):
"""
Loads a list of included features arrays in order to concatenate them \
into the final X_train and X_test arrays. Then saves these arrays as well \
as the corresponding y_train and y_test arrays. Finally, writes the \
included features configuration into a file.
Args:
poi_gdf (geopandas.GeoDataFrame): Contains the pois for which the \
features will be created
train_idxs (numpy.ndarray): Contains the train indexes
test_idxs | |
##########################################################################
#
# Copyright (c) 2019, Hypothetical Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of Hypothetical Inc. nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import IECore
import Gaffer
import GafferDispatch
import GafferDeadline
class DeadlineDispatcher(GafferDispatch.Dispatcher):
def __init__(self, name="DeadlineDispatcher"):
GafferDispatch.Dispatcher.__init__(self, name)
self._deadline_jobs = []
# Emitted prior to submitting the Deadline job, to allow
# custom modifications to be applied.
#
# Slots should have the signature `slot( dispatcher, job )`,
# where dispatcher is the DeadlineDispatcher and job will
# be the instance of GafferDeadlineJob that is about
# to be spooled.
@classmethod
def preSpoolSignal(cls):
return cls.__preSpoolSignal
__preSpoolSignal = Gaffer.Signal2()
def _doDispatch(self, root_batch):
'''
_doDispatch is called by Gaffer, the others (prefixed with __) are just helpers for Deadline
Note that Gaffer and Deadline use some terms differently
Gaffer Batch =~ Deadline Task, which could be multiple frames in a single task. Depending on batch layout
multiple Deadline Tasks may be needed to fullfill a single Gaffer Batch. For example, a Deadline
Task can only handle sequential frames.
Gaffer TaskNode =~ Deadline Job. A Gaffer Task can have multiple Deadline Jobs to complete it depending on batch and context layout.
A DeadlineJob is defined by the combination of Gaffer TaskNode and Context.
Gaffer Job = set of Deadline Jobs (could be considered a Deadline Batch)
Use DeadlineJob, DeadlineTask, etc. to denote Deadline terminology and plain Batch, Job, etc. to denote Gaffer terminology.
Batches can have dependencies completely independent of frame numbers. First
walk through the batch tree to build up a set of GafferDeadlineJob objects with GafferDeadlineTask objects corresponding
to the batches.
When all tasks are created, go back through the tree to setup dependencies between tasks. Task dependencies may be different
from Batch dependencies because batches may have been split to accommodate Deadline's sequential frame task requirement.
With dependencies set, start at the leaf nodes of the task tree (no upstream DeadlineJobs) and submit those first. That way
the Deadline Job ID can be stored and used by dependent jobs to set their dependencies correctly.
To be compatible with Deadline's ExtraInfoKeyValue system, dependencies are reformatted at submission as
task:job_dependency_id=task_dependency_number
'''
self._deadline_jobs = []
IECore.Log.info("Beginning Deadline submission")
dispatch_data = {}
dispatch_data["scriptNode"] = root_batch.preTasks()[0].node().scriptNode()
dispatch_data["scriptFile"] = os.path.join(self.jobDirectory(), os.path.basename(dispatch_data["scriptNode"]["fileName"].getValue()) or "untitled.gfr")
dispatch_data["scriptFile"] = dispatch_data["scriptFile"].replace("\\", os.sep).replace("/", os.sep)
dispatch_data["scriptNode"].serialiseToFile(dispatch_data["scriptFile"])
context = Gaffer.Context.current()
dispatch_data["deadlineBatch"] = context.substitute(self["jobName"].getValue()) or "untitled"
root_deadline_job = GafferDeadline.GafferDeadlineJob()
root_deadline_job.setGafferNode(root_batch.node())
root_deadline_job.setAuxFiles([dispatch_data["scriptFile"]])
self.__addGafferDeadlineJob(root_deadline_job)
root_jobs = []
for upstream_batch in root_batch.preTasks():
root_job = self.__buildDeadlineJobWalk(upstream_batch, dispatch_data)
if root_job is not None:
root_jobs.append(root_job)
root_jobs = list(set(root_jobs))
for root_job in root_jobs:
self.__buildDeadlineDependencyWalk(root_job)
# Control jobs with nothing to control should be removed after the dependencies are set up.
# This mostly applies to FrameMask nodes where downstream nodes need to see tasks on the FrameMask
# to trigger Job-Job dependency mode, but those tasks should not be submitted to Deadline.
self.__removeOrphanTasksWalk(root_job)
self.__submitDeadlineJob(root_job, dispatch_data)
def __buildDeadlineJobWalk(self, batch, dispatch_data):
if batch.blindData().get("deadlineDispatcher:visited"):
return self.__getGafferDeadlineJob(batch.node(), batch.context())
deadline_job = self.__getGafferDeadlineJob(batch.node(), batch.context())
if not deadline_job:
deadline_job = GafferDeadline.GafferDeadlineJob()
deadline_job.setGafferNode(batch.node())
deadline_job.setContext(batch.context())
deadline_job.setAuxFiles([dispatch_data["scriptFile"]])
self.__addGafferDeadlineJob(deadline_job)
deadline_job.addBatch(batch, batch.frames())
for upstream_batch in batch.preTasks():
parent_deadline_job = self.__buildDeadlineJobWalk(upstream_batch, dispatch_data)
if parent_deadline_job is not None:
deadline_job.addParentJob(parent_deadline_job)
batch.blindData()["deadlineDispatcher:visited"] = IECore.BoolData(True)
return deadline_job
def __buildDeadlineDependencyWalk(self, job):
for parent_job in job.getParentJobs():
self.__buildDeadlineDependencyWalk(parent_job)
job.buildTaskDependencies()
def __removeOrphanTasksWalk(self, job):
for parent_job in job.getParentJobs():
self.__removeOrphanTasksWalk(parent_job)
job.removeOrphanTasks()
def __getGafferDeadlineJob(self, node, context):
for j in self._deadline_jobs:
if j.getGafferNode() == node and j.getContext() == context:
return j
def __addGafferDeadlineJob(self, new_deadline_job):
self._deadline_jobs.append(new_deadline_job)
self._deadline_jobs = list(set(self._deadline_jobs))
def __submitDeadlineJob(self, deadline_job, dispatch_data):
# submit jobs depth first so parent job IDs will be populated
for parent_job in deadline_job.getParentJobs():
self.__submitDeadlineJob(parent_job, dispatch_data)
gaffer_node = deadline_job.getGafferNode()
if gaffer_node is None or len(deadline_job.getTasks()) == 0 or len(deadline_job.getTasks()) == 0:
return None
# this job is already submitted if it has an ID
if deadline_job.getJobID() is not None:
return deadline_job.getJobID()
self.preSpoolSignal()(self, deadline_job)
deadline_plug = gaffer_node["dispatcher"].getChild("deadline")
if deadline_plug is not None:
initial_status = "Suspended" if deadline_plug["submitSuspended"].getValue() else "Active"
machine_list_type = "Blacklist" if deadline_plug["isBlackList"].getValue() else "Whitelist"
# to prevent Deadline from splitting up our tasks (since we've already done that based on batches), set the chunk size to the largest frame range
chunk_size = deadline_job.getTasks()[0].getEndFrame() - deadline_job.getTasks()[0].getStartFrame() + 1
frame_string = ""
for t in deadline_job.getTasks():
chunk_size = max(t.getEndFrame() - t.getStartFrame() + 1, chunk_size)
if t.getStartFrame() == t.getEndFrame():
frame_string += ",{}".format(t.getStartFrame())
else:
frame_string += ",{}-{}".format(t.getStartFrame(), t.getEndFrame())
context = deadline_job.getContext()
job_info = {"Name": gaffer_node.relativeName(dispatch_data["scriptNode"]),
"Frames": frame_string,
"ChunkSize": chunk_size,
"Plugin": "Gaffer",
"BatchName": dispatch_data["deadlineBatch"],
"Comment": context.substitute(deadline_plug["comment"].getValue()),
"Department": context.substitute(deadline_plug["department"].getValue()),
"Pool": context.substitute(deadline_plug["pool"].getValue()),
"SecondaryPool": context.substitute(deadline_plug["secondaryPool"].getValue()),
"Group": context.substitute(deadline_plug["group"].getValue()),
"Priority": deadline_plug["priority"].getValue(),
"TaskTimeoutMinutes": int(deadline_plug["taskTimeout"].getValue()),
"EnableAutoTimeout": deadline_plug["enableAutoTimeout"].getValue(),
"ConcurrentTasks": deadline_plug["concurrentTasks"].getValue(),
"MachineLimit": deadline_plug["machineLimit"].getValue(),
machine_list_type: context.substitute(deadline_plug["machineList"].getValue()),
"LimitGroups": context.substitute(deadline_plug["limits"].getValue()),
"OnJobComplete": deadline_plug["onJobComplete"].getValue(),
"InitialStatus": initial_status,
}
auxFiles = deadline_job.getAuxFiles() # this will already have substitutions included
auxFiles += [context.substitute(f) for f in deadline_plug["auxFiles"].getValue()]
deadline_job.setAuxFiles(auxFiles)
environmentVariables = IECore.CompoundData()
deadline_plug["environmentVariables"].fillCompoundData(environmentVariables)
for name, value in environmentVariables.items():
deadline_job.appendEnvironmentVariable(name, context.substitute(str(value)))
deadlineSettings = IECore.CompoundData()
deadline_plug["deadlineSettings"].fillCompoundData(deadlineSettings)
for name, value in deadlineSettings.items():
deadline_job.appendDeadlineSetting(name, context.substitute(str(value)))
""" Dependencies are stored with a reference to the Deadline job since job IDs weren't assigned
when the task tree was walked. Now that parent jobs have been submitted and have IDs,
we can substitute that in for the dependency script to pick up.
We also want to dependencies to be as native to Deadline as possible, resorting to the dependency
script only in cases where it is needed (Deadline's dependency script triggering seems to be slower
than native task dependencies)
There are three possible dependency types allowed by Deadline:
1) Job-Job: All of the tasks for job A wait for all of the tasks for job B to finish before job A runs.
This is relatively rare when coming from Deadline and mostly is used by nodes upstream from
a FrameMask node. In that case releasing tasks per-frame would trigger downstream jobs sooner
than they should.
2) Frame-Frame: This is somewhat misleadingly named because Deadline only checks for frame dependency release
after each task completes, so this is very similar to task-task dependencies. Deadline can
only handle a start and end frame offset when comparing to the parent job so the task
offsets must match across all parent jobs to enable this mode.
3) Task-Task: A task for job A waits for a task for job B to finish before the task for job A runs.
If the dependency start and end frame offsets don't match, this has to be handled by a
dependency script.
"""
dep_list = deadline_job.getDependencies()
if len(dep_list) > 0 and deadline_plug["dependencyMode"].getValue() != "None":
job_dependent = False
frame_dependent = False
simple_frame_offset = True
if deadline_plug["dependencyMode"].getValue() == "Job":
job_dependent = True
elif deadline_plug["dependencyMode"].getValue() == "Frame":
frame_dependent = True
elif deadline_plug["dependencyMode"].getValue() == "Auto":
job_dependent = False
dep_jobs = [j["dependency_job"] for j in dep_list]
dep_jobs = list(set(dep_jobs))
for | |
"""
# Determine new ant locations
for player in range(self.num_players):
player_hills = sorted(self.player_hills(player),
key=lambda hill: (hill.last_touched, random()))
for hill in player_hills:
# hill must not be razed or occupied to be used
# player must have food in hive to spawn
if (self.hive_food[player] > 0 and
hill.loc not in self.current_ants):
self.hive_food[player] -= 1
self.add_ant(hill)
def add_food(self, loc):
""" Add food to a location
An error is raised if the location is not free
"""
if self.map[loc[0]][loc[1]] != LAND:
raise Exception("Add food error",
"Food already found at %s" %(loc,))
self.map[loc[0]][loc[1]] = FOOD
food = Food(loc, self.turn)
self.current_food[loc] = food
self.all_food.append(food)
return food
def remove_food(self, loc, owner=None):
""" Remove food from a location
An error is raised if no food exists there.
"""
try:
self.map[loc[0]][loc[1]] = LAND
self.current_food[loc].end_turn = self.turn
if owner is not None:
self.current_food[loc].owner = owner
self.hive_food[owner] += 1
return self.current_food.pop(loc)
except KeyError:
raise Exception("Remove food error",
"Food not found at %s" %(loc,))
def add_hill(self, loc, owner):
hill = Hill(loc, owner)
self.hills[loc] = hill
return hill
def raze_hill(self, hill, killed_by):
hill.end_turn = self.turn
hill.killed_by = killed_by
self.score[killed_by] += HILL_POINTS
self.score[hill.owner] += RAZE_POINTS
# reset cutoff_turns
self.cutoff_turns = 0
def player_hills(self, player):
""" Return the current hills belonging to the given player """
return [hill for _, hill in self.hills.items()
if hill.owner == player and hill.killed_by is None]
def add_ant(self, hill):
""" Spawn an ant on a hill
"""
loc = hill.loc
owner = hill.owner
ant = Ant(loc, owner, self.turn)
row, col = loc
self.map[row][col] = owner
self.all_ants.append(ant)
self.current_ants[loc] = ant
hill.last_touched = self.turn
return ant
def add_initial_ant(self, loc, owner):
ant = Ant(loc, owner, self.turn)
row, col = loc
self.map[row][col] = owner
self.all_ants.append(ant)
self.current_ants[loc] = ant
return ant
def kill_ant(self, ant, ignore_error=False):
""" Kill the ant at the given location
Raises an error if no ant is found at the location
(if ignore error is set to False)
"""
try:
loc = ant.loc
self.map[loc[0]][loc[1]] = LAND
self.killed_ants.append(ant)
ant.killed = True
ant.die_turn = self.turn
return self.current_ants.pop(loc)
except KeyError:
if not ignore_error:
raise Exception("Kill ant error",
"Ant not found at %s" %(loc,))
def player_ants(self, player):
""" Return the current ants belonging to the given player """
return [ant for ant in self.current_ants.values() if player == ant.owner]
def do_raze_hills(self):
for loc, hill in self.hills.items():
if loc in self.current_ants:
ant = self.current_ants[loc]
if ant.owner == hill.owner:
hill.last_touched = self.turn
elif hill.killed_by is None:
self.raze_hill(hill, ant.owner)
def do_attack_damage(self):
""" Kill ants which take more than 1 damage in a turn
Each ant deals 1/#nearby_enemy damage to each nearby enemy.
(nearby enemies are those within the attackradius)
Any ant with at least 1 damage dies.
Damage does not accumulate over turns
(ie, ants heal at the end of the battle).
"""
damage = defaultdict(Fraction)
nearby_enemies = {}
# each ant damages nearby enemies
for ant in self.current_ants.values():
enemies = self.nearby_ants(ant.loc, self.attackradius, ant.owner)
if enemies:
nearby_enemies[ant] = enemies
strenth = 10 # dot dot dot
if ant.orders[-1] == '-':
strenth = 10
else:
strenth = 10
damage_per_enemy = Fraction(strenth, len(enemies)*10)
for enemy in enemies:
damage[enemy] += damage_per_enemy
# kill ants with at least 1 damage
for ant in damage:
if damage[ant] >= 1:
self.kill_ant(ant)
def do_attack_support(self):
""" Kill ants which have more enemies nearby than friendly ants
An ant dies if the number of enemy ants within the attackradius
is greater than the number of friendly ants within the attackradius.
The current ant is not counted in the friendly ant count.
1 point is distributed evenly among the enemies of the dead ant.
"""
# map ants (to be killed) to the enemies that kill it
ants_to_kill = {}
for ant in self.current_ants.values():
enemies = []
friends = []
# sort nearby ants into friend and enemy lists
for nearby_ant in self.nearby_ants(ant.loc, self.attackradius, ant.owner):
if nearby_ant.owner == ant.owner:
friends.append(nearby_ant)
else:
enemies.append(nearby_ant)
# add ant to kill list if it doesn't have enough support
if len(friends) < len(enemies):
ants_to_kill[ant] = enemies
# actually do the killing and score distribution
for ant, enemies in ants_to_kill.items():
self.kill_ant(ant)
def do_attack_focus(self):
""" Kill ants which are the most surrounded by enemies
For a given ant define: Focus = 1/NumOpponents
An ant's Opponents are enemy ants which are within the attackradius.
Ant alive if its Focus is greater than Focus of any of his Opponents.
If an ant dies 1 point is shared equally between its Opponents.
"""
# maps ants to nearby enemies
nearby_enemies = {}
for ant in self.current_ants.values():
nearby_enemies[ant] = self.nearby_ants(ant.loc, self.attackradius, ant.owner)
# determine which ants to kill
ants_to_kill = []
for ant in self.current_ants.values():
# determine this ants weakness (1/focus)
weakness = len(nearby_enemies[ant])
# an ant with no enemies nearby can't be attacked
if weakness == 0:
continue
# determine the most focused nearby enemy
min_enemy_weakness = min(len(nearby_enemies[enemy]) for enemy in nearby_enemies[ant])
# ant dies if it is weak as or weaker than an enemy weakness
if min_enemy_weakness <= weakness:
ants_to_kill.append(ant)
# kill ants and distribute score
for ant in ants_to_kill:
self.kill_ant(ant)
def do_attack_closest(self):
""" Iteratively kill neighboring groups of ants """
# maps ants to nearby enemies by distance
ants_by_distance = {}
for ant in self.current_ants.values():
# pre-compute distance to each enemy in range
dist_map = defaultdict(list)
for enemy in self.nearby_ants(ant.loc, self.attackradius, ant.owner):
dist_map[self.distance(ant.loc, enemy.loc)].append(enemy)
ants_by_distance[ant] = dist_map
# create helper method to find ant groups
ant_group = set()
def find_enemy(ant, distance):
""" Recursively finds a group of ants to eliminate each other """
# we only need to check ants at the given distance, because closer
# ants would have been eliminated already
for enemy in ants_by_distance[ant][distance]:
if not enemy.killed and enemy not in ant_group:
ant_group.add(enemy)
find_enemy(enemy, distance)
# setup done - start the killing
for distance in range(1, self.attackradius):
for ant in self.current_ants.values():
if not ants_by_distance[ant] or ant.killed:
continue
ant_group = set([ant])
find_enemy(ant, distance)
# kill all ants in groups with more than 1 ant
# this way of killing is order-independent because the
# the ant group is the same regardless of which ant
# you start looking at
if len(ant_group) > 1:
for ant in ant_group:
self.kill_ant(ant)
def destination(self, loc, d):
""" Returns the location produced by offsetting loc by d """
return ((loc[0] + d[0]) % self.height, (loc[1] + d[1]) % self.width)
def access_map(self):
""" Determine the list of locations that each player is closest to """
distances = {}
players = defaultdict(set)
square_queue = deque()
# determine the starting squares and valid squares
# (where food can be placed)
for row, squares in enumerate(self.map):
for col, square in enumerate(squares):
loc = (row, col)
if square >= 0:
distances[loc] = 0
players[loc].add(square)
square_queue.append(loc)
elif square != WATER:
distances[loc] = None
# use bfs to determine who can reach each square first
while square_queue:
c_loc = square_queue.popleft()
for d in AIM.values():
n_loc = self.destination(c_loc, d)
if n_loc not in distances: continue # wall
if distances[n_loc] is None:
# first visit to this square
distances[n_loc] = distances[c_loc] + 1
players[n_loc].update(players[c_loc])
square_queue.append(n_loc)
elif distances[n_loc] == distances[c_loc] + 1:
# we've seen this square before, but the distance is
# the same - therefore combine the players that can
# reach this square
players[n_loc].update(players[c_loc])
# summarise the final results of the squares that are closest
# to a single unique player
access_map = defaultdict(list)
for coord, player_set in players.items():
if len(player_set) != 1: continue
access_map[player_set.pop()].append(coord)
return access_map
def find_closest_land(self, coord):
""" Find the closest square to coord which is a land square using BFS
Return None if no square is found
"""
if self.map[coord[0]][coord[1]] == LAND:
return coord
visited = set()
square_queue = deque([coord])
while square_queue:
c_loc = square_queue.popleft()
for d in AIM.values():
n_loc = self.destination(c_loc, d)
if n_loc in visited: continue
if self.map[n_loc[0]][n_loc[1]] == LAND:
return n_loc
visited.add(n_loc)
square_queue.append(n_loc)
return None
def do_food_none(self, amount=0):
""" Place no food """
return amount
def do_food_random(self, amount=1):
""" | |
##
# This software was developed and / or modified by Raytheon Company,
# pursuant to Contract DG133W-05-CQ-1067 with the US Government.
#
# U.S. EXPORT CONTROLLED TECHNICAL DATA
# This software product contains export-restricted data whose
# export/transfer/disclosure is restricted by U.S. law. Dissemination
# to non-U.S. persons whether in the United States or abroad requires
# an export license or other authorization.
#
# Contractor Name: <NAME>
# Contractor Address: 6825 Pine Street, Suite 340
# Mail Stop B8
# Omaha, NE 68106
# 402.291.0100
#
# See the AWIPS II Master Rights File ("Master Rights File.pdf") for
# further licensing information.
##
# ----------------------------------------------------------------------------
# This software is in the public domain, furnished "as is", without technical
# support, and with no warranty, express or implied, as to its usefulness for
# any purpose.
#
# Miscellaneous Tests for SFT
#
# Author: cheatwood
# ----------------------------------------------------------------------------
SFT_createGrids = [
("Fcst", "Wx", "WEATHER", 0, 24, "NoWx", "all"),
("Fcst", "Wx", "WEATHER", 0, 24, "Wide:R:--:<NoVis>:^Wide:S:-:<NoVis>:", ["area1"]),
("Fcst", "Wx", "WEATHER", 0, 24, "Wide:S:+:1/4SM:", ["area2"]),
("Fcst", "Wx", "WEATHER", 0, 24, "Wide:RW:--:<NoVis>:^Wide:T:<NoInten>:<NoVis>:", ["area3"]),
("Fcst", "Wx", "WEATHER", 24, 48, "NoWx", "all"),
("Fcst", "Wx", "WEATHER", 24, 48, "Chc:T:<NoInten>:<NoVis>:", ["area1"]),
("Fcst", "Wx", "WEATHER", 24, 48, "SChc:R:--:<NoVis>:", ["area2"]),
("Fcst", "Wx", "WEATHER", 24, 48, "SChc:ZR:--:<NoVis>:", ["area3"]),
("Fcst", "Wx", "WEATHER", 48, 72, "NoWx", "all"),
("Fcst", "Wx", "WEATHER", 48, 72, "Iso:RW:--:<NoVis>:", ["area1"]),
("Fcst", "Wx", "WEATHER", 48, 72, "Areas:ZL:--:<NoVis>:", ["area2"]),
("Fcst", "Wx", "WEATHER", 48, 72, "Sct:SW:--:<NoVis>:", ["area3"]),
("Fcst", "Wx", "WEATHER", 72, 96, "NoWx", "all"),
("Fcst", "Wx", "WEATHER", 72, 96, "Def:BS:<NoInten>:<NoVis>:", ["area1"]),
("Fcst", "Wx", "WEATHER", 72, 96, "SChc:S:-:<NoVis>:", ["area2"]),
("Fcst", "Wx", "WEATHER", 72, 96, "Def:BS:<NoInten>:<NoVis>:", ["area3"]),
("Fcst", "Wx", "WEATHER", 96, 120, "NoWx", "all"),
("Fcst", "Wx", "WEATHER", 96, 120, "Iso:SW:-:<NoVis>:", ["area1"]),
("Fcst", "Wx", "WEATHER", 96, 120, "Areas:F:+:<NoVis>:", ["area2"]),
("Fcst", "Wx", "WEATHER", 96, 120, "Wide:R:--:<NoVis>:", ["area3"]),
("Fcst", "Wx", "WEATHER", 120, 144, "NoWx", "all"),
("Fcst", "Wx", "WEATHER", 120, 144, "Wide:L:--:<NoVis>:", ["area1"]),
("Fcst", "Wx", "WEATHER", 120, 144, "Patchy:L:--:<NoVis>:", ["area2"]),
("Fcst", "Wx", "WEATHER", 120, 144, "Patchy:BD:<NoInten>:<NoVis>:", ["area3"]),
("Fcst", "Wx", "WEATHER", 144, 168, "NoWx", "all"),
("Fcst", "Wx", "WEATHER", 144, 168, "Wide:IP:--:<NoVis>:", ["area1"]),
("Fcst", "Wx", "WEATHER", 144, 168, "Def:H:<NoInten>:<NoVis>:", ["area2"]),
("Fcst", "Wx", "WEATHER", 144, 168, "Patchy:K:<NoInten>:<NoVis>:", ["area3"]),
("Fcst", "Wx", "WEATHER", 168, 192, "Wide:R:--:<NoVis>:", ["area1"]),
("Fcst", "Wx", "WEATHER", 168, 192, "Sct:T:<NoInten>:<NoVis>:", ["area2"]),
("Fcst", "Wx", "WEATHER", 168, 192, "Patchy:ZL:--:<NoVis>:", ["area3"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 70, ["area1"]),
("Fcst", "MinT", "SCALAR", "MinTBegin", "MinTEnd", 43, ["area1"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 80, ["area2"]),
("Fcst", "MinT", "SCALAR", "MinTBegin", "MinTEnd", 47, ["area2"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 90, ["area3"]),
("Fcst", "MinT", "SCALAR", "MinTBegin", "MinTEnd", 49, ["area3"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin + 24", "MaxTEnd + 24", 73, ["area1"]),
("Fcst", "MinT", "SCALAR", "MinTBegin + 24", "MinTEnd + 24", 45, ["area1"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin + 24", "MaxTEnd + 24", 81, ["area2"]),
("Fcst", "MinT", "SCALAR", "MinTBegin + 24", "MinTEnd + 24", 48, ["area2"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin + 24", "MaxTEnd + 24", 92, ["area3"]),
("Fcst", "MinT", "SCALAR", "MinTBegin + 24", "MinTEnd + 24", 50, ["area3"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin + 48", "MaxTEnd + 48", 75, ["area1"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin + 48", "MaxTEnd + 48", 82, ["area2"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin + 48", "MaxTEnd + 48", 95, ["area3"]),
("Fcst", "MinT", "SCALAR", "MinTBegin + 48", "MinTEnd + 48", 47, ["area1"]),
("Fcst", "MinT", "SCALAR", "MinTBegin + 48", "MinTEnd + 48", 50, ["area2"]),
("Fcst", "MinT", "SCALAR", "MinTBegin + 48", "MinTEnd + 48", 52, ["area3"]),
("Fcst", "MinT", "SCALAR", "MinTBegin + 72", "MinTEnd + 72", 47, ["area1"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin + 72", "MaxTEnd + 72", 77, ["area1"]),
("Fcst", "MinT", "SCALAR", "MinTBegin + 72", "MinTEnd + 72", 50, ["area2"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin + 72", "MaxTEnd + 72", 85, ["area2"]),
("Fcst", "MinT", "SCALAR", "MinTBegin + 72", "MinTEnd + 72", 52, ["area3"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin + 72", "MaxTEnd + 72", 96, ["area3"]),
("Fcst", "MinT", "SCALAR", "MinTBegin + 96", "MinTEnd + 96", 49, ["area1"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin + 96", "MaxTEnd + 96", 79, ["area1"]),
("Fcst", "MinT", "SCALAR", "MinTBegin + 96", "MinTEnd + 96", 51, ["area2"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin + 96", "MaxTEnd + 96", 86, ["area2"]),
("Fcst", "MinT", "SCALAR", "MinTBegin + 96", "MinTEnd + 96", 54, ["area3"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin + 96", "MaxTEnd + 96", 100, ["area3"]),
("Fcst", "MinT", "SCALAR", "MinTBegin + 120", "MinTEnd + 120", 49, ["area1"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin + 120", "MaxTEnd + 120", 81, ["area1"]),
("Fcst", "MinT", "SCALAR", "MinTBegin + 120", "MinTEnd + 120", 53, ["area2"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin + 120", "MaxTEnd + 120", 60, ["area2"]),
("Fcst", "MinT", "SCALAR", "MinTBegin + 120", "MinTEnd + 120", 55, ["area3"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin + 120", "MaxTEnd + 120", 103, ["area3"]),
("Fcst", "MinT", "SCALAR", "MinTBegin + 144", "MinTEnd + 144", 49, ["area1"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin + 144", "MaxTEnd + 144", 81, ["area1"]),
("Fcst", "MinT", "SCALAR", "MinTBegin + 144", "MinTEnd + 144", 50, ["area2"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin + 144", "MaxTEnd + 144", 80, ["area2"]),
("Fcst", "MinT", "SCALAR", "MinTBegin + 144", "MinTEnd + 144", 55, ["area3"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin + 144", "MaxTEnd + 144", 96, ["area3"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin + 168", "MaxTEnd + 168", 83, ["area1"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin + 168", "MaxTEnd + 168", 85, ["area2"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin + 168", "MaxTEnd + 168", 100, ["area3"]),
("Fcst", "MinT", "SCALAR", "MinTBegin + 168", "MinTEnd + 168", 52, ["area1"]),
("Fcst", "MinT", "SCALAR", "MinTBegin + 168", "MinTEnd + 168", 54, ["area2"]),
("Fcst", "MinT", "SCALAR", "MinTBegin + 168", "MinTEnd + 168", 58, ["area3"]),
("Fcst", "PoP", "SCALAR", 0, 12, 45, ["area1"]),
("Fcst", "PoP", "SCALAR", 0, 12, 45, ["area2"]),
("Fcst", "PoP", "SCALAR", 0, 12, 45, ["area3"]),
("Fcst", "PoP", "SCALAR", 12, 24, 0, ["area1"]),
("Fcst", "PoP", "SCALAR", 12, 24, 40, ["area2"]),
("Fcst", "PoP", "SCALAR", 12, 24, 40, ["area3"]),
("Fcst", "PoP", "SCALAR", 24, 36, 50, ["area1"]),
("Fcst", "PoP", "SCALAR", 24, 36, 60, ["area2"]),
("Fcst", "PoP", "SCALAR", 24, 36, 70, ["area3"]),
("Fcst", "PoP", "SCALAR", 36, 48, 60, ["area1"]),
("Fcst", "PoP", "SCALAR", 36, 48, 70, ["area2"]),
("Fcst", "PoP", "SCALAR", 36, 48, 80, ["area3"]),
("Fcst", "PoP", "SCALAR", 48, 60, 55, ["area1"]),
("Fcst", "PoP", "SCALAR", 48, 60, 55, ["area2"]),
("Fcst", "PoP", "SCALAR", 48, 60, 60, ["area3"]),
("Fcst", "PoP", "SCALAR", 60, 72, 100, ["area1"]),
("Fcst", "PoP", "SCALAR", 60, 72, 85, ["area2"]),
("Fcst", "PoP", "SCALAR", 60, 72, 72, ["area3"]),
("Fcst", "PoP", "SCALAR", 72, 84, 62, ["area1"]),
("Fcst", "PoP", "SCALAR", 72, 84, 100, ["area2"]),
("Fcst", "PoP", "SCALAR", 72, 84, 60, ["area3"]),
("Fcst", "PoP", "SCALAR", 84, 96, 70, ["area1"]),
("Fcst", "PoP", "SCALAR", 84, 96, 80, ["area2"]),
("Fcst", "PoP", "SCALAR", 84, 96, 100, ["area3"]),
("Fcst", "PoP", "SCALAR", 96, 108, 45, ["area1"]),
("Fcst", "PoP", "SCALAR", 96, 108, 50, ["area2"]),
("Fcst", "PoP", "SCALAR", 96, 108, 45, ["area3"]),
("Fcst", "PoP", "SCALAR", 108, 120, 40, ["area1"]),
("Fcst", "PoP", "SCALAR", 108, 120, 75, ["area2"]),
("Fcst", "PoP", "SCALAR", 108, 120, 60, ["area3"]),
("Fcst", "PoP", "SCALAR", 120, 132, 55, ["area1"]),
("Fcst", "PoP", "SCALAR", 120, 132, 60, ["area2"]),
("Fcst", "PoP", "SCALAR", 120, 132, 55, ["area3"]),
("Fcst", "PoP", "SCALAR", 132, 144, 45, ["area1"]),
("Fcst", "PoP", "SCALAR", 132, 144, 25, ["area2"]),
("Fcst", "PoP", "SCALAR", 132, 144, 55, ["area3"]),
("Fcst", "PoP", "SCALAR", 144, 156, 90, ["area1"]),
("Fcst", "PoP", "SCALAR", 144, 156, 85, ["area2"]),
("Fcst", "PoP", "SCALAR", 144, 156, 70, ["area3"]),
("Fcst", "PoP", "SCALAR", 156, 168, 60, ["area1"]),
("Fcst", "PoP", "SCALAR", 156, 168, 0, ["area2"]),
("Fcst", "PoP", "SCALAR", 156, 168, 67, ["area3"]),
("Fcst", "PoP", "SCALAR", 168, 180, 50, ["area1"]),
("Fcst", "PoP", "SCALAR", 168, 180, 55, ["area2"]),
("Fcst", "PoP", "SCALAR", 168, 180, 60, ["area3"]),
("Fcst", "PoP", "SCALAR", 180, 192, 50, ["area1"]),
("Fcst", "PoP", "SCALAR", 180, 192, 55, ["area2"]),
("Fcst", "PoP", "SCALAR", 180, 192, 60, ["area3"]),
("Fcst", "Wind", "VECTOR", 0, 24, (40, "SW"), "all"),
("Fcst", "Wind", "VECTOR", 24, 48, (35, "NW"), "all"),
("Fcst", "Wind", "VECTOR", 48, 72, (45, "W"), "all"),
("Fcst", "Wind", "VECTOR", 72, 96, (50, "NW"), "all"),
("Fcst", "Wind", "VECTOR", 96, 120, (45, "N"), "all"),
("Fcst", "Wind", "VECTOR", 120, 144, (40, "NW"), "all"),
("Fcst", "Wind", "VECTOR", 144, 168, (30, "W"), "all"),
("Fcst", "Wind", "VECTOR", 168, 192, (20, "E"), "all"),
("Fcst", "QPF", "SCALAR", 0, 12, 0, ["area1"]),
("Fcst", "QPF", "SCALAR", 0, 12, 0.01, ["area2"]),
("Fcst", "QPF", "SCALAR", 0, 12, 0.05, ["area3"]),
("Fcst", "QPF", "SCALAR", 12, 24, 0, ["area1"]),
("Fcst", "QPF", "SCALAR", | |
# coding: utf-8
# public items
__all__ = ["loaddfits", "savefits", "loadnetcdf", "savenetcdf"]
# standard library
from datetime import datetime
from pytz import timezone
from logging import getLogger
from uuid import uuid4
from pathlib import Path
from pkgutil import get_data
# dependent packages
import tomli
import decode as dc
import numpy as np
import xarray as xr
from astropy.io import fits
from scipy.interpolate import interp1d
# module logger
logger = getLogger(__name__)
def loaddfits(
fitsname,
coordtype="azel",
loadtype="temperature",
starttime=None,
endtime=None,
pixelids=None,
scantypes=None,
mode=0,
**kwargs
):
"""Load a decode array from a DFITS file.
Args:
fitsname (str): Name of DFITS file.
coordtype (str): Coordinate type included into a decode array.
'azel': Azimuth / elevation.
'radec': Right ascension / declination.
loadtype (str): Data unit of xarray.
'Tsignal': Temperature [K].
'Psignal': Power [W].
'amplitude': Amplitude.
'phase': Phase.
'linphase': Linear phase.
starttime (int, str or numpy.datetime64): Start time of loaded data.
It can be specified by the start index (int),
the time compatible with numpy.datetime64 (str),
or numpy.datetime64 (numpy.datetime64).
Default is None and it means the data will be loaded from the first record.
endtime (int, str or numpy.datetime64): End time of loaded data.
It can be specified by the end index (int),
the time compatible with numpy.datetime64 (str),
or numpy.datetime64 (numpy.datetime64).
Default is None and it means the data will be loaded until the last record.
pixelids (int or list): Under development.
scantypes (list(str)): Scan types, such as 'GRAD', 'SCAN', 'OFF', 'R'.
mode (int): Loading mode.
0: Relative coordinates with cosine projection (RECOMMENDED).
1: Relative coordinates without cosine projection.
2: Absolute coordinates.
kwargs (optional):
findR (bool): Automatically find R positions.
ch (int): Representative channel id used for finding R.
Rth (float): Threshold of R.
skyth (flaot): Threshold of sky.
cutnum (int): The number of points of unused data at the edge.
still (bool): When it is true, scantypes of on/off are manually assigned.
period (float): On/off period in second for still data.
shuttle (bool): For shuttle observations.
xmin_off (float): Minimum x of off-point data.
xmax_off (float): Maximum x of off-point data.
xmin_on (float): Minimum x of on-point data.
xmax_on (float): Maximum x of on-point data.
Returns:
decode array (decode.array): Loaded decode array.
"""
if mode not in [0, 1, 2]:
raise KeyError(mode)
logger.info("coordtype starttime endtime mode loadtype")
logger.info("{} {} {} {} {}".format(coordtype, starttime, endtime, mode, loadtype))
# pick up kwargs
# for findR
findR = kwargs.pop("findR", False)
ch = kwargs.pop("ch", 0)
Rth = kwargs.pop("Rth", 280)
skyth = kwargs.pop("skyth", 150)
cutnum = kwargs.pop("cutnum", 1)
# for still
still = kwargs.pop("still", False)
period = kwargs.pop("period", 2)
# for shuttle
shuttle = kwargs.pop("shuttle", False)
xmin_off = kwargs.pop("xmin_off", 0)
xmax_off = kwargs.pop("xmax_off", 0)
xmin_on = kwargs.pop("xmin_on", 0)
xmax_on = kwargs.pop("xmax_on", 0)
# load data
fitsname = str(Path(fitsname).expanduser())
with fits.open(fitsname) as hdulist:
obsinfo = hdulist["OBSINFO"].data
obshdr = hdulist["OBSINFO"].header
antlog = hdulist["ANTENNA"].data
readout = hdulist["READOUT"].data
wealog = hdulist["WEATHER"].data
# obsinfo
masterids = obsinfo["masterids"][0].astype(np.int64)
kidids = obsinfo["kidids"][0].astype(np.int64)
kidfreqs = obsinfo["kidfreqs"][0].astype(np.float64)
kidtypes = obsinfo["kidtypes"][0].astype(np.int64)
# parse start/end time
t_ant = np.array(antlog["time"]).astype(np.datetime64)
t_out = np.array(readout["starttime"]).astype(np.datetime64)
t_wea = np.array(wealog["time"]).astype(np.datetime64)
if starttime is None:
startindex = 0
elif isinstance(starttime, int):
startindex = starttime
elif isinstance(starttime, str):
startindex = np.searchsorted(t_out, np.datetime64(starttime))
elif isinstance(starttime, np.datetime64):
startindex = np.searchsorted(t_out, starttime)
else:
raise ValueError(starttime)
if endtime is None:
endindex = t_out.shape[0]
elif isinstance(endtime, int):
endindex = endtime
elif isinstance(endtime, str):
endindex = np.searchsorted(t_out, np.datetime64(endtime), "right")
elif isinstance(endtime, np.datetime64):
endindex = np.searchsorted(t_out, endtime, "right")
else:
raise ValueError(starttime)
if t_out[endindex - 1] > t_ant[-1]:
logger.warning("Endtime of readout is adjusted to that of ANTENNA HDU.")
endindex = np.searchsorted(t_out, t_ant[-1], "right")
t_out = t_out[startindex:endindex]
# readout
if loadtype == "temperature":
response = readout["Tsignal"][startindex:endindex].astype(np.float64)
elif loadtype == "power":
response = readout["Psignal"][startindex:endindex].astype(np.float64)
elif loadtype == "amplitude":
response = readout["amplitude"][startindex:endindex].astype(np.float64)
elif loadtype == "phase":
response = readout["phase"][startindex:endindex].astype(np.float64)
elif loadtype == "linphase":
response = readout["line_phase"][startindex:endindex].astype(np.float64)
else:
raise KeyError(loadtype)
# antenna
if coordtype == "azel":
x = antlog["az"].copy()
y = antlog["el"].copy()
xref = np.median(antlog["az_center"])
yref = np.median(antlog["el_center"])
if mode in [0, 1]:
x -= antlog["az_center"]
y -= antlog["el_center"]
if mode == 0:
x *= np.cos(np.deg2rad(antlog["el"]))
elif coordtype == "radec":
x = antlog["ra"].copy()
y = antlog["dec"].copy()
xref = obshdr["RA"]
yref = obshdr["DEC"]
if mode in [0, 1]:
x -= xref
y -= yref
if mode == 0:
x *= np.cos(np.deg2rad(antlog["dec"]))
else:
raise KeyError(coordtype)
scantype = antlog["scantype"]
# weatherlog
temp = wealog["temperature"]
pressure = wealog["pressure"]
vpressure = wealog["vapor-pressure"]
windspd = wealog["windspd"]
winddir = wealog["winddir"]
# interpolation
dt_out = (t_out - t_out[0]) / np.timedelta64(1, "s")
dt_ant = (t_ant - t_out[0]) / np.timedelta64(1, "s")
dt_wea = (t_wea - t_out[0]) / np.timedelta64(1, "s")
x_i = np.interp(dt_out, dt_ant, x)
y_i = np.interp(dt_out, dt_ant, y)
temp_i = np.interp(dt_out, dt_wea, temp)
pressure_i = np.interp(dt_out, dt_wea, pressure)
vpressure_i = np.interp(dt_out, dt_wea, vpressure)
windspd_i = np.interp(dt_out, dt_wea, windspd)
winddir_i = np.interp(dt_out, dt_wea, winddir)
scandict = {t: n for n, t in enumerate(np.unique(scantype))}
scantype_v = np.zeros(scantype.shape[0], dtype=int)
for k, v in scandict.items():
scantype_v[scantype == k] = v
scantype_vi = interp1d(
dt_ant,
scantype_v,
kind="nearest",
bounds_error=False,
fill_value=(scantype_v[0], scantype_v[-1]),
)(dt_out)
scantype_i = np.full_like(scantype_vi, "GRAD", dtype="<U8")
for k, v in scandict.items():
scantype_i[scantype_vi == v] = k
# for still data
if still:
for n in range(int(dt_out[-1]) // period + 1):
offmask = (period * 2 * n <= dt_out) & (dt_out < period * (2 * n + 1))
onmask = (period * (2 * n + 1) <= dt_out) & (dt_out < period * (2 * n + 2))
scantype_i[offmask] = "OFF"
scantype_i[onmask] = "SCAN"
if shuttle:
offmask = (xmin_off < x_i) & (x_i < xmax_off)
onmask = (xmin_on < x_i) & (x_i < xmax_on)
scantype_i[offmask] = "OFF"
scantype_i[onmask] = "SCAN"
scantype_i[(~offmask) & (~onmask)] = "JUNK"
if findR:
Rindex = np.where(response[:, ch] >= Rth)
scantype_i[Rindex] = "R"
movemask = np.hstack(
[[False] * cutnum, scantype_i[cutnum:] != scantype_i[:-cutnum]]
) | np.hstack(
[scantype_i[:-cutnum] != scantype_i[cutnum:], [False] * cutnum]
) & (
scantype_i == "R"
)
scantype_i[movemask] = "JUNK"
scantype_i[(response[:, ch] > skyth) & (scantype_i != "R")] = "JUNK"
scantype_i[(response[:, ch] <= skyth) & (scantype_i == "R")] = "JUNK"
skyindex = np.where(response[:, ch] <= skyth)
scantype_i_temp = scantype_i.copy()
scantype_i_temp[skyindex] = "SKY"
movemask = np.hstack(
[[False] * cutnum, scantype_i_temp[cutnum:] != scantype_i_temp[:-cutnum]]
) | np.hstack(
[scantype_i_temp[:-cutnum] != scantype_i_temp[cutnum:], [False] * cutnum]
) & (
scantype_i_temp == "SKY"
)
scantype_i[movemask] = "JUNK"
# scanid
scanid_i = np.cumsum(np.hstack([False, scantype_i[1:] != scantype_i[:-1]]))
# coordinates
tcoords = {
"x": x_i,
"y": y_i,
"time": t_out,
"temp": temp_i,
"pressure": pressure_i,
"vapor-pressure": vpressure_i,
"windspd": windspd_i,
"winddir": winddir_i,
"scantype": scantype_i,
"scanid": scanid_i,
}
chcoords = {
"masterid": masterids,
"kidid": kidids,
"kidfq": kidfreqs,
"kidtp": kidtypes,
}
scalarcoords = {
"coordsys": coordtype.upper(),
"datatype": loadtype,
"xref": xref,
"yref": yref,
}
# make array
array = dc.array(
response, tcoords=tcoords, chcoords=chcoords, scalarcoords=scalarcoords
)
if scantypes is not None:
mask = np.full(array.shape[0], False)
for scantype in scantypes:
mask |= array.scantype == scantype
array = array[mask]
return array
def savefits(cube, fitsname, **kwargs):
"""Save a cube to a 3D-cube FITS file.
Args:
cube (xarray.DataArray): Cube to be saved.
fitsname (str): Name of output FITS file.
kwargs (optional): Other arguments common with astropy.io.fits.writeto().
"""
# pick up kwargs
dropdeg = kwargs.pop("dropdeg", False)
ndim = len(cube.dims)
# load yaml
fitsinfo = get_data("decode", "data/fitsinfo.toml")
hdrdata = tomli.loads(fitsinfo.decode("utf-8"))
# default header
if ndim == 2:
header = fits.Header(hdrdata["dcube_2d"])
data = cube.values.T
elif ndim == 3:
if dropdeg:
header = fits.Header(hdrdata["dcube_2d"])
data = cube.values[:, :, 0].T
else:
header = fits.Header(hdrdata["dcube_3d"])
kidfq = cube.kidfq.values
freqrange = ~np.isnan(kidfq)
orderedfq = np.argsort(kidfq[freqrange])
newcube = cube[:, :, orderedfq]
data = newcube.values.T
else:
raise TypeError(ndim)
# update Header
if cube.coordsys == "AZEL":
header.update({"CTYPE1": "dAZ", "CTYPE2": "dEL"})
elif cube.coordsys == "RADEC":
header.update({"OBSRA": float(cube.xref), "OBSDEC": float(cube.yref)})
else:
pass
header.update(
{
"CRVAL1": float(cube.x[0]),
"CDELT1": float(cube.x[1] - cube.x[0]),
"CRVAL2": float(cube.y[0]),
"CDELT2": float(cube.y[1] - cube.y[0]),
"DATE": datetime.now(timezone("UTC")).isoformat(),
}
)
if (ndim == 3) and (not dropdeg):
header.update(
{
"CRVAL3": float(newcube.kidfq[0]),
"CDELT3": float(newcube.kidfq[1] - newcube.kidfq[0]),
}
)
fitsname = str(Path(fitsname).expanduser())
fits.writeto(fitsname, data, header, **kwargs)
logger.info("{} has been created.".format(fitsname))
def loadnetcdf(filename, copy=True):
"""Load a dataarray from a NetCDF file.
Args:
filename (str): Filename (*.nc).
copy (bool): If True, dataarray is copied in memory. Default is | |
<filename>DIP/fluo_beads_sim/fluorescent_beads.py
# -*- coding: utf-8 -*-
"""
Experiments with simulation of images of fluorescent beads for evaluation of precision of their localization.
In other words, for generationg ground truth data for tracking / segmentation evaluations.
@author: ssklykov
"""
# %% General imports
import numpy as np
import matplotlib.pyplot as plt
import scipy.ndimage.filters as filters # includes many filters
# import cv2 # importing of OpenCV module (not used so far for simplicity)
# print(cv2.__version__) # checking the version of installed OpenCV
import os
from skimage.io import imsave
import math
# %% class definition
class image_beads():
"""Collection of all methods related to generation of fluorescent beads in the microscope images."""
# default values
width = 11
height = 11
possible_img_types = ['uint8', 'uint16', 'float']
character_size = 5
image_type = 'uint8'
bead_types = ["even round", "gaussian round", "uneven round", "even improved round"]
bead_type = "even round"
bead_img = np.zeros((height, width), dtype=image_type) # intensity profile of a bead
bead_border = np.zeros((height, width), dtype=image_type) # edge or border of a bead
maxPixelValue = 255 # default for a 8bit gray image
kernel_PSF = [] # for storing the kernel for convolution ("diffraction blurring of sharp edges")
bead_conv_border = [] # for storing blurred border
offsets = [0, 0] # for storing global offset of a bead image (matrix with its profile)
# Since the PSF kernel is saved as the class attribute, the collection of following parameters also useful:
NA = 1.25
wavelength = 532 # in nanometers
calibration = 110 # in nanometer/pixel
max_pixel_value_bead = 255 # for 8bit image
debug_offsets = []
debug_centers = []
shifts = [0.0, 0.0, 1.0, 1.0] # for fixing bug of wrong placing of images
# %% Constructor
def __init__(self, image_type: str = 'uint8', character_size: int = 5, bead_type: str = "even round"):
"""
Generate basis class with the image (representation) of fluorescent bead with various intensity profile.
Parameters
----------
image_type : str, optional
Image type: 8bit, 16bit or float gray image. The default is 'uint8'.
character_size : int, optional
Characteristic size of a bead. For 'even round' bead - radius. The default is 5.
bead_type : str, optional
Type of fluorescent bead profile: 'even round', 'gaussian round', 'uneven round'.
The default is "even round".
Returns
-------
None.
"""
if image_type in self.possible_img_types:
self.image_type = image_type
else:
self.image_type = 'uint8'
print("Image type hasn't been recognized, initialized default 8bit gray image")
if bead_type in self.bead_types:
self.bead_type = bead_type
else:
self.bead_type = "even round"
print("Bead type hasn't been recognized, initialized default 'even round' type")
if (character_size != 5) or (image_type != 'uint8'):
self.character_size = character_size
self.width = int(character_size*2) + 1
self.height = int(character_size*2) + 1
self.bead_img = np.zeros((self.height, self.width), dtype=self.image_type)
self.bead_border = np.zeros((self.height, self.width), dtype=self.image_type)
if image_type == 'uint16':
self.maxPixelValue = 65535
elif image_type == 'float':
self.maxPixelValue = 1.0 # According to the specification of scikit-image
# %% Generate centralized profile
def get_centralized_bead(self, max_pixel_val):
"""
Generate centared image of a bead with specified type.
Parameters
----------
max_pixel_val : int(uint8 or uint16) or float
Maximum intensity value in the center of a bead (or just through a bead intensity profile).
Returns
-------
None. Calculated 2D image and the border stored in class' attributes self.bead_img and self.bead_border.
"""
# Below - for repeating generation of initial sized bead after trimming of the previously generated image
if self.bead_type == "even round":
self.height = int(self.character_size*2) + 1
self.width = int(self.character_size*2) + 1
self.bead_img = np.zeros((self.height, self.width), dtype=self.image_type)
i_center = ((self.height - 1) // 2)
j_center = ((self.width - 1) // 2)
if self.bead_type == "even round":
radius = np.round(self.character_size*0.5, 3)
for i in range(self.width):
for j in range(self.height):
distance = np.round((np.sqrt(np.power((i - i_center), 2) + np.power((j - j_center), 2))), 3)
position_diff = np.round((distance - radius), 3)
# print(i, j, ":", distance - radius)
# HINT: Simple even round bead - all pixels laying in radius of a bead have the same intensity
if (position_diff < 1.0):
self.bead_img[i, j] = max_pixel_val
if ((distance - radius) < 1) and ((distance - radius) >= 0):
self.bead_border[i, j] = max_pixel_val
# HINT: attempt to calculate more real even round bead profile with some smoothing of edge pixels
if self.bead_type == "even improved round":
self.height = int(self.character_size*2) + 1
self.width = int(self.character_size*2) + 1
self.bead_img = np.zeros((self.height, self.width), dtype=self.image_type)
radius = np.round(self.character_size*0.5, 3)
round_precision = 3
for i in range(self.width):
for j in range(self.height):
distance = np.round((np.sqrt(np.power((i - i_center), 2) + np.power((j - j_center), 2))),
round_precision)
position_diff = np.round((distance - radius), round_precision)
# HINT: some model with approximation on the difference between distance to the pixel and radius
# This model implies reduction to the half of the maximum on the edges
# ???: artificats on the edges remaining, but the integration should give the same result, isn't it?
# ???: visually - also need to be inspected
# TODO: check dependency on the even and odd radius on the simulation
if (position_diff < 0.0): # innert part of a bead
self.bead_img[i, j] = max_pixel_val # all pixels completely lays within the round profile
if (position_diff == 0.0): # special, 'border' pixels - there distance exactly the same as radius
# integral estimation or the square of occupied pixel there distance equal to radius (even pixels)
self.bead_img[i, j] = np.uint8(max_pixel_val*0.5)
print(np.uint8(max_pixel_val*0.5))
if (position_diff > 0.0) and (position_diff < 1.0): # pixels on the edges of circle
# print(1 - position_diff)
# (1 - position_diff == remaining part of the pixel occupied by point-sourse fluorophores)
# the square of this pixel estimated as the portion of 'border' pixel
# maybe, this approximation is wrong, I just tried to base it on the geometrical drawings
self.bead_img[i, j] = np.uint8(max_pixel_val*0.5*(1 - position_diff))
# !!!: calculation of the border in general redundant
if ((distance - radius) < 1) and ((distance - radius) >= 0):
# print(distance-radius)
self.bead_border[i, j] = max_pixel_val
# %% Generate centralized with shift less 1 pixel profile
def get_bead_img_arbit_center(self, i_offset: float, j_offset: float, max_pixel_val, round_precision: int = 3,
debug: bool = False):
"""
Generate shifted for less than 1 pixel centared before image of a bead with specified type.
Such splitting on even pixel shift and less than 1 pixel is performed because of even offset
could be applied only for integration the bead image on the larger scene there even shifts are
simply drawn by shifting entire bead image.
Parameters
----------
i_offset : float
Arbitrary shift that will splitted on even pixel shift stored in self.offsets and less than 1 pixel
offset applied for an image calculation.
j_offset : float
Same as i_offset but in other direction.
max_pixel_val : int(uint8 or uint16) or float
Maximum intensity value in the center of a bead (or just through a bead intensity profile).
round_precision: int, optional
Restrict the precision of calculation of float differences to control (estimate) possible rounding errors.
The default is 3.
debug: bool, optional
Flag for saving some internal statistical values for checking of possible bugs during calculations.
The default is False.
Returns
-------
None. Calculated 2D image and the border stored in class' attributes self.bead_img and self.bead_border
"""
# Below - for repeating generation of initial sized bead after trimming of the previously generated image
if self.bead_type == "even round":
self.height = int(self.character_size*2) + 1
self.width = int(self.character_size*2) + 1
self.bead_img = np.zeros((self.height, self.width), dtype=self.image_type)
# The self.offsets will store even pixel offsets for bead for its further introducing to the scene (background)
self.offsets[0] = 0 # reinitilization for repeating of generation
self.offsets[1] = 0 # reinitilization for repeating of generation
# Attempt to avoid rounding errors - introducing delta_precision (difference between 0.0 - 0.0 != 0.0 always)
if round_precision > 1:
delta_precision = 1/(pow(10, round_precision + 3))
delta_precision = np.round(delta_precision, round_precision + 4)
# print(delta_precision)
# print(round_precision)
# Using the math module modf() instead of manually defining float and integer part of input float values
(i_float_part, i_integer_part) = math.modf(i_offset)
(j_float_part, j_integer_part) = math.modf(j_offset)
| |
<reponame>xylar/e3sm_to_cmip<gh_stars>1-10
'''
Utilities related to converting MPAS-Ocean and MPAS-Seaice files to CMOR
'''
from __future__ import absolute_import, division, print_function
import re
import numpy as np
import netCDF4
from datetime import datetime
import sys
import xarray
import os
import cmor
import subprocess
import tempfile
import logging
import argparse
from dask.diagnostics import ProgressBar
import dask
import multiprocessing
from multiprocessing.pool import ThreadPool
def remap(ds, mappingFileName, threshold=0.05):
'''Use ncreamp to remap the xarray Dataset to a new target grid'''
# write the dataset to a temp file
inFileName = _get_temp_path()
outFileName = _get_temp_path()
if 'depth' in ds.dims:
ds = ds.transpose('time', 'depth', 'nCells', 'nbnd')
write_netcdf(ds, inFileName)
# set an environment variable to make sure we're not using czender's
# local version of NCO instead of one we have intentionally loaded
env = os.environ.copy()
env['NCO_PATH_OVERRIDE'] = 'no'
args = ['ncremap', '-7', '--dfl_lvl=1', '--no_stdin',
'--no_cll_msr', '--no_frm_trm', '--no_stg_grd', '--msk_src=none',
'--mask_dst=none', '--map={}'.format(mappingFileName), inFileName,
outFileName]
proc = subprocess.Popen(args, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, env=env)
(out, err) = proc.communicate()
logging.info(out)
if(proc.returncode):
print("Error running ncremap command: {}".format(" ".join(args)))
print(err.decode('utf-8'))
raise subprocess.CalledProcessError(
'ncremap returned {}'.format(proc.returncode))
ds = xarray.open_dataset(outFileName, decode_times=False)
if 'depth' in ds.dims:
ds = ds.transpose('time', 'depth', 'lat', 'lon', 'nbnd')
ds.load()
if 'cellMask' in ds:
mask = ds['cellMask'] > threshold
norm = 1./ds['cellMask'].where(mask)
ds = ds.drop('cellMask')
for varName in ds.data_vars:
var = ds[varName]
# make sure all of the mask dimensions are in the variable
if all([dim in var.dims for dim in mask.dims]):
ds[varName] = ds[varName].where(mask)*norm
# remove the temporary files
os.remove(inFileName)
os.remove(outFileName)
return ds
def avg_to_mid_level(ds):
dsNew = xarray.Dataset()
for varName in ds.data_vars:
var = ds[varName]
if 'nVertLevelsP1' in var.dims:
nVertP1 = var.sizes['nVertLevelsP1']
dsNew[varName] = 0.5*(var.isel(nVertLevelsP1=slice(0, nVertP1-1)) +
var.isel(nVertLevelsP1=slice(1, nVertP1)))
else:
dsNew[varName] = ds[varName]
return dsNew
def add_time(ds, dsIn, referenceDate='0001-01-01', offsetYears=0):
'''Parse the MPAS xtime variable into CF-compliant time'''
ds = ds.rename({'Time': 'time'})
dsIn = dsIn.rename({'Time': 'time'})
xtimeStart = dsIn.xtime_startMonthly
xtimeEnd = dsIn.xtime_endMonthly
xtimeStart = [''.join(x.astype('U')).strip()
for x in xtimeStart.values]
xtimeEnd = [''.join(x.astype('U')).strip()
for x in xtimeEnd.values]
# fix xtimeStart, which has an offset by a time step (or so)
xtimeStart = ['{}_00:00:00'.format(xtime[0:10]) for xtime in xtimeStart]
daysStart = offsetYears*365 + \
_string_to_days_since_date(dateStrings=xtimeStart,
referenceDate=referenceDate)
daysEnd = offsetYears*365 + \
_string_to_days_since_date(dateStrings=xtimeEnd,
referenceDate=referenceDate)
time_bnds = np.zeros((len(daysStart), 2))
time_bnds[:, 0] = daysStart
time_bnds[:, 1] = daysEnd
days = 0.5*(daysStart + daysEnd)
ds.coords['time'] = ('time', days)
ds.time.attrs['units'] = 'days since {}'.format(referenceDate)
ds.time.attrs['bounds'] = 'time_bnds'
ds['time_bnds'] = (('time', 'nbnd'), time_bnds)
ds.time_bnds.attrs['units'] = 'days since {}'.format(referenceDate)
return ds
def add_depth(ds, dsCoord):
'''Add a 1D depth coordinate to the data set'''
if 'nVertLevels' in ds.dims:
ds = ds.rename({'nVertLevels': 'depth'})
dsCoord = dsCoord.rename({'nVertLevels': 'depth'})
depth, depth_bnds = _compute_depth(dsCoord.refBottomDepth)
ds.coords['depth'] = ('depth', depth)
ds.depth.attrs['long_name'] = 'reference depth of the center of ' \
'each vertical level'
ds.depth.attrs['standard_name'] = 'depth'
ds.depth.attrs['units'] = 'meters'
ds.depth.attrs['axis'] = 'Z'
ds.depth.attrs['positive'] = 'down'
ds.depth.attrs['valid_min'] = depth_bnds[0, 0]
ds.depth.attrs['valid_max'] = depth_bnds[-1, 1]
ds.depth.attrs['bounds'] = 'depth_bnds'
ds.coords['depth_bnds'] = (('depth', 'nbnd'), depth_bnds)
ds.depth_bnds.attrs['long_name'] = 'Gridcell depth interfaces'
for varName in ds.data_vars:
var = ds[varName]
if 'depth' in var.dims:
var = var.assign_coords(depth=ds.depth)
ds[varName] = var
return ds
def add_mask(ds, mask):
'''
Add a 2D or 3D mask to the data sets and multiply all variables by the
mask
'''
ds = ds.copy()
for varName in ds.data_vars:
var = ds[varName]
if all([dim in var.dims for dim in mask.dims]):
ds[varName] = var.where(mask, 0.)
ds['cellMask'] = 1.0*mask
return ds
def add_si_mask(ds, mask, siconc, threshold=0.05):
'''
Add a 2D mask to the data sets and apply the mask to all variables
'''
mask = np.logical_and(
mask, siconc > threshold)
ds = ds.copy()
for varName in ds.data_vars:
var = ds[varName]
if all([dim in var.dims for dim in mask.dims]):
ds[varName] = var.where(mask, 0.)
ds['cellMask'] = 1.0*mask
return ds
def get_cell_masks(dsMesh):
'''Get 2D and 3D masks of valid MPAS cells from the mesh Dataset'''
cellMask2D = dsMesh.maxLevelCell > 0
nVertLevels = dsMesh.sizes['nVertLevels']
vertIndex = \
xarray.DataArray.from_dict({'dims': ('nVertLevels',),
'data': np.arange(nVertLevels)})
cellMask3D = vertIndex < dsMesh.maxLevelCell
return cellMask2D, cellMask3D
def get_sea_floor_values(ds, dsMesh):
'''Sample fields in the data set at the sea floor'''
ds = ds.copy()
cellMask2D = dsMesh.maxLevelCell > 0
nVertLevels = dsMesh.sizes['nVertLevels']
# zero-based indexing in python
maxLevelCell = dsMesh.maxLevelCell - 1
vertIndex = \
xarray.DataArray.from_dict({'dims': ('nVertLevels',),
'data': np.arange(nVertLevels)})
for varName in ds.data_vars:
if 'nVertLevels' not in ds[varName].dims or \
'nCells' not in ds[varName].dims:
continue
# mask only the values with the right vertical index
ds[varName] = ds[varName].where(maxLevelCell == vertIndex)
# Each vertical layer has at most one non-NaN value so the "sum"
# over the vertical is used to collapse the array in the vertical
# dimension
ds[varName] = ds[varName].sum(dim='nVertLevels').where(cellMask2D)
return ds
def open_mfdataset(fileNames, variableList=None,
chunks={'nCells': 32768, 'Time': 6}, daskThreads=6):
'''Open a multi-file xarray Dataset, retaining only the listed variables'''
dask.config.set(schedular='threads',
pool=ThreadPool(min(multiprocessing.cpu_count(),
daskThreads)))
ds = xarray.open_mfdataset(fileNames, combine='nested', decode_cf=False,
decode_times=False, concat_dim='Time',
mask_and_scale=False, chunks=chunks)
if variableList is not None:
allvars = ds.data_vars.keys()
# get set of variables to drop (all ds variables not in vlist)
dropvars = set(allvars) - set(variableList)
# drop spurious variables
ds = ds.drop(dropvars)
# must also drop all coordinates that are not associated with the
# variables
coords = set()
for avar in ds.data_vars.keys():
coords |= set(ds[avar].coords.keys())
dropcoords = set(ds.coords.keys()) - coords
# drop spurious coordinates
ds = ds.drop(dropcoords)
return ds
def write_netcdf(ds, fileName, fillValues=netCDF4.default_fillvals, unlimited=None):
'''Write an xarray Dataset with NetCDF4 fill values where needed'''
encodingDict = {}
variableNames = list(ds.data_vars.keys()) + list(ds.coords.keys())
for variableName in variableNames:
isNumeric = np.issubdtype(ds[variableName].dtype, np.number)
if isNumeric:
dtype = ds[variableName].dtype
for fillType in fillValues:
if dtype == np.dtype(fillType):
encodingDict[variableName] = \
{'_FillValue': fillValues[fillType]}
break
else:
encodingDict[variableName] = {'_FillValue': None}
update_history(ds)
if unlimited:
ds.to_netcdf(fileName, encoding=encodingDict, unlimited_dims=unlimited)
else:
ds.to_netcdf(fileName, encoding=encodingDict)
def update_history(ds):
'''Add or append history to attributes of a data set'''
thiscommand = datetime.now().strftime("%a %b %d %H:%M:%S %Y") + ": " + \
" ".join(sys.argv[:])
if 'history' in ds.attrs:
newhist = '\n'.join([thiscommand, ds.attrs['history']])
else:
newhist = thiscommand
ds.attrs['history'] = newhist
def convert_namelist_to_dict(fileName):
'''Convert an MPAS namelist file to a python dictionary'''
nml = {}
regex = re.compile(r"^\s*(.*?)\s*=\s*['\"]*(.*?)['\"]*\s*\n")
with open(fileName) as f:
for line in f:
match = regex.findall(line)
if len(match) > 0:
nml[match[0][0].lower()] = match[0][1]
return nml
def setup_cmor(varname, tables, user_input_path, component='ocean', table=None):
'''Set up CMOR for MPAS-Ocean or MPAS-Seaice'''
logfile = os.path.join(os.getcwd(), 'cmor_logs')
if not os.path.exists(logfile):
os.makedirs(logfile)
logfile = os.path.join(logfile, varname + '.log')
cmor.setup(
inpath=tables,
netcdf_file_action=cmor.CMOR_REPLACE,
logfile=logfile)
cmor.dataset_json(str(user_input_path))
if table is None:
if component == 'ocean':
table = 'CMIP6_Omon.json'
elif component == 'seaice':
table = 'CMIP6_SImon.json'
else:
raise ValueError('Unexpected component {}'.format(component))
try:
cmor.load_table(table)
except Exception:
raise ValueError('Unable to load table from {}'.format(varname))
def write_cmor(axes, ds, varname, varunits, d2f=True, **kwargs):
'''Write a time series of a variable in the format expected by CMOR'''
axis_ids = list()
for axis in axes:
axis_id = cmor.axis(**axis)
axis_ids.append(axis_id)
if d2f and ds[varname].dtype == np.float64:
print('Converting {} to float32'.format(varname))
ds[varname] = ds[varname].astype(np.float32)
fillValue = netCDF4.default_fillvals['f4']
if np.any(np.isnan(ds[varname])):
mask = np.isfinite(ds[varname])
ds[varname] = ds[varname].where(mask, fillValue)
# create the cmor variable
varid = cmor.variable(str(varname), str(varunits), axis_ids,
missing_value=fillValue, **kwargs)
# write out the data
try:
if 'time' not in ds.dims:
cmor.write(
varid,
ds[varname].values)
else:
cmor.write(
varid,
ds[varname].values,
time_vals=ds.time.values,
time_bnds=ds.time_bnds.values)
except Exception as error:
logging.exception('Error in cmor.write for {}'.format(varname))
raise
finally:
cmor.close(varid)
def compute_moc_streamfunction(dsIn=None, dsMesh=None, dsMasks=None,
showProgress=True):
'''
An entry point to compute the MOC streamfunction (including Bolus velocity)
and write it to a new file.
'''
useCommandLine = dsIn is None and dsMesh is None and dsMasks is None
if useCommandLine:
# must be running from the command line
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("-m", "--meshFileName", dest="meshFileName",
type=str, required=True,
help="An MPAS file with mesh data (edgesOnCell, "
"etc.)")
parser.add_argument("-r", "--regionMasksFileName",
dest="regionMasksFileName", type=str,
required=True,
help="An MPAS file with MOC region masks")
parser.add_argument("-i", "--inFileNames", dest="inFileNames",
type=str, required=True,
help="An MPAS monthly mean files from which to "
"compute transport.")
parser.add_argument("-o", "--outFileName", dest="outFileName",
type=str, required=True,
help="An output MPAS file with transport time "
"series")
args = parser.parse_args()
dsMesh = xarray.open_dataset(args.meshFileName)
dsMesh = dsMesh.isel(Time=0, drop=True)
dsMasks = xarray.open_dataset(args.regionMasksFileName)
variableList = ['timeMonthly_avg_normalVelocity',
'timeMonthly_avg_normalGMBolusVelocity',
'timeMonthly_avg_vertVelocityTop',
'timeMonthly_avg_vertGMBolusVelocityTop',
'timeMonthly_avg_layerThickness',
'xtime_startMonthly', 'xtime_endMonthly']
dsIn = open_mfdataset(args.inFileNames, variableList)
dsOut = xarray.Dataset()
dsIn = dsIn.chunk(chunks={'nCells': None, 'nVertLevels': None,
'Time': 6})
cellsOnEdge = dsMesh.cellsOnEdge - 1
totalNormalVelocity = \
(dsIn.timeMonthly_avg_normalVelocity +
dsIn.timeMonthly_avg_normalGMBolusVelocity)
layerThickness = dsIn.timeMonthly_avg_layerThickness
layerThicknessEdge = 0.5*(layerThickness[:, cellsOnEdge[:, 0], :] +
layerThickness[:, cellsOnEdge[:, 1], :])
totalVertVelocityTop = \
| |
<filename>python/paddle/distributed/auto_parallel/completion.py
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from copy import deepcopy
import time
from paddle.fluid import core
from paddle.fluid import framework
from .utils import print_program_with_dist_attr
from .operators import find_best_compatible_distributed_operator_impl
from .dist_context import get_default_distributed_context
from .dist_tensor import DistributedTensor
from .dist_op import DistributedOperator
from .dist_attribute import TensorDistributedAttribute
from .dist_attribute import OperatorDistributedAttribute
from paddle.distributed.fleet.meta_optimizers.common import OpRole
def compute_compatible_process_mesh(process_mesh_list):
"""Compute the compatible process mesh given a list of process meshes."""
if not process_mesh_list:
return None
def _compute_compatible_process_mesh_two(pm1, pm2):
if pm1 is None:
return True, pm2
if pm2 is None:
return True, pm1
if pm1 == pm2:
return True, pm1
if pm1.processes == pm2.processes:
if len(pm1.topology) >= len(pm2.topology):
return True, pm1
else:
return True, pm2
process_set1 = set(pm1.processes)
process_set2 = set(pm2.processes)
if process_set1.issubset(process_set2):
return True, pm2
if process_set2.issubset(process_set1):
return True, pm1
return False, None
compatible_result = None
for process_mesh in process_mesh_list:
compatible, compatible_result = _compute_compatible_process_mesh_two(
compatible_result, process_mesh)
if not compatible:
return None
return copy.deepcopy(compatible_result)
def compute_compatible_dim_mapping(dim_mapping_list):
"""Compute the compatible dim mapping given a list of dim mapping."""
if not dim_mapping_list:
return None
def _compute_compatible_dim_mapping_two(dm1, dm2):
if dm1 == -1:
return True, dm2
if dm2 == -1:
return True, dm1
if dm1 == dm2:
return True, dm1
return False, None
compatible_result = -1
for mapping in dim_mapping_list:
compatible, compatible_result = _compute_compatible_dim_mapping_two(
compatible_result, mapping)
if not compatible:
return None
return compatible_result
def compute_compatible_dims_mapping(dims_mapping_list):
"""Compute the compatible dims mapping given a list of dims mapping.
Each of dims mapping is also a list.
"""
if not dims_mapping_list:
return None
length = len(dims_mapping_list[0])
for dims_mapping in dims_mapping_list:
if dims_mapping is None:
return None
if len(dims_mapping) != length:
return None
compatible_result = []
for dim_mappings in zip(*dims_mapping_list):
compatible_dim_mapping = compute_compatible_dim_mapping(
list(dim_mappings))
if compatible_dim_mapping is None:
return None
compatible_result.append(compatible_dim_mapping)
return compatible_result
class Completer:
def __init__(self, dist_context):
assert dist_context is not None
self._dist_context = dist_context
def _update_tensor_node_dims_mapping(self, tensor_node, fwd=True):
changed = False
if (not tensor_node.is_var()) or (tensor_node.var() is None):
return False
tensor_desc = tensor_node.var()
# Skip reader tensor
if tensor_desc.type() == core.VarDesc.VarType.READER:
return False
tensor_dist_attr = self._dist_context.get_tensor_dist_attr_for_graph(
tensor_node)
assert tensor_dist_attr is not None
if tensor_dist_attr.is_annotated("dims_mapping"):
return False
tensor_dims_mapping = tensor_dist_attr.dims_mapping
if fwd:
dims_mapping_list = []
for pred_op_node in tensor_node.inputs:
if pred_op_node.op() is not None:
if pred_op_node.op().type() == "create_py_reader" \
or pred_op_node.op().type() == "create_double_buffer_reader" \
or pred_op_node.op().type() == "read":
continue
op_dist_attr = self._dist_context.get_op_dist_attr_for_graph(
pred_op_node)
if op_dist_attr.process_mesh == tensor_dist_attr.process_mesh:
op_dims_mapping = op_dist_attr.get_output_dims_mapping(
tensor_desc.name())
dims_mapping_list.append(op_dims_mapping)
dims_mapping_list.append(tensor_dims_mapping)
compatible_dims_mapping = compute_compatible_dims_mapping(
dims_mapping_list)
if (compatible_dims_mapping is not None) and \
(compatible_dims_mapping != tensor_dims_mapping):
tensor_dist_attr.dims_mapping = compatible_dims_mapping
changed = True
else:
dims_mapping_list = []
for succ_op_node in tensor_node.outputs:
if succ_op_node.op() is not None:
if succ_op_node.op().type() == "create_py_reader" \
or succ_op_node.op().type() == "create_double_buffer_reader" \
or succ_op_node.op().type() == "read":
continue
op_dist_attr = self._dist_context.get_op_dist_attr_for_graph(
succ_op_node)
if op_dist_attr.process_mesh == tensor_dist_attr.process_mesh:
op_dims_mapping = op_dist_attr.get_input_dims_mapping(
tensor_desc.name())
dims_mapping_list.append(op_dims_mapping)
dims_mapping_list.append(tensor_dims_mapping)
compatible_dims_mapping = compute_compatible_dims_mapping(
dims_mapping_list)
if (compatible_dims_mapping is not None) and \
(compatible_dims_mapping != tensor_dims_mapping):
tensor_dist_attr.dims_mapping = compatible_dims_mapping
changed = True
return changed
def _update_op_node_dims_mapping(self, op_node, fwd=True):
changed = False
if (not op_node.is_op()) or (op_node.op() is None):
return False
# Skip reader op
op_desc = op_node.op()
if op_desc.type() == "create_py_reader" \
or op_desc.type() == "create_double_buffer_reader" \
or op_desc.type() == "read":
return False
dist_op = self._dist_context.get_dist_op_for_graph(op_node)
op_dist_attr = dist_op.dist_attr
if fwd:
for tensor_node in op_node.inputs:
if tensor_node.var() is not None:
if tensor_node.var().type() == core.VarDesc.VarType.READER:
continue
tensor_desc = tensor_node.var()
if op_dist_attr.is_annotated_input_dims_mapping(
tensor_desc.name()):
continue
tensor_dist_attr = self._dist_context.get_tensor_dist_attr_for_graph(
tensor_node)
if op_dist_attr.process_mesh == tensor_dist_attr.process_mesh:
tensor_dims_mapping = tensor_dist_attr.dims_mapping
op_dims_mapping = op_dist_attr.get_input_dims_mapping(
tensor_desc.name())
compatible_dims_mapping = compute_compatible_dims_mapping(
[op_dims_mapping, tensor_dims_mapping])
if (compatible_dims_mapping is not None) and \
(compatible_dims_mapping != op_dims_mapping):
op_dist_attr.set_input_dims_mapping(
tensor_desc.name(), compatible_dims_mapping)
changed = True
# Find the most compatible implemenetations from the distributed operator
op_dist_impl = find_best_compatible_distributed_operator_impl(
dist_op, fwd=True)
assert op_dist_impl is not None, "Cannot find the dist op implementation."
dim_changed = op_dist_impl.update_dims_mapping(dist_op)
if dim_changed:
changed = True
if op_dist_impl.is_auto_compatible(dist_op):
if op_dist_impl.type == "elementwise":
op_dist_attr.impl_type = "default"
else:
op_dist_attr.impl_type = op_dist_impl.type
op_dist_attr.impl_idx = op_dist_impl.idx
else:
for tensor_node in op_node.outputs:
if tensor_node.var() is not None:
if tensor_node.var().type() == core.VarDesc.VarType.READER:
continue
tensor_desc = tensor_node.var()
if op_dist_attr.is_annotated_output_dims_mapping(
tensor_desc.name()):
continue
tensor_dist_attr = self._dist_context.get_tensor_dist_attr_for_graph(
tensor_node)
if op_dist_attr.process_mesh == tensor_dist_attr.process_mesh:
tensor_dims_mapping = tensor_dist_attr.dims_mapping
op_dims_mapping = op_dist_attr.get_output_dims_mapping(
tensor_desc.name())
compatible_dims_mapping = compute_compatible_dims_mapping(
[op_dims_mapping, tensor_dims_mapping])
if (compatible_dims_mapping is not None) and \
(compatible_dims_mapping != op_dims_mapping):
op_dist_attr.set_output_dims_mapping(
tensor_desc.name(), compatible_dims_mapping)
changed = True
# Find the most compatible implemenetations from the distributed operator
op_dist_impl = find_best_compatible_distributed_operator_impl(
dist_op, fwd=False)
assert op_dist_impl is not None, "Cannot find the dist op implementation."
dim_changed = op_dist_impl.update_dims_mapping(dist_op)
if dim_changed:
changed = True
if op_dist_impl.is_auto_compatible(dist_op):
if op_dist_impl.type == "elementwise":
op_dist_attr.impl_type = "default"
else:
op_dist_attr.impl_type = op_dist_impl.type
op_dist_attr.impl_idx = op_dist_impl.idx
return changed
def _update_process_mesh(self):
def _find_nearset_node(nodes, idx):
for node in reversed(nodes[:idx]):
node_dist_attr = self._dist_context.get_dist_attr_for_graph(
node)
if node_dist_attr.process_mesh is not None:
return node
total_reach_fix_point = False
while not total_reach_fix_point:
total_changed = False
for is_fwd in [True, False]:
all_nodes = self._dist_context.serial_ordered_nodes \
if is_fwd else reversed(self._dist_context.serial_ordered_nodes)
reach_fix_point = False
while not reach_fix_point:
changed = False
for idx, node in enumerate(all_nodes):
nearest_node = _find_nearset_node(
self._dist_context.serial_ordered_nodes, idx)
if nearest_node is None:
continue
nearest_node_dis_attr = self._dist_context.get_dist_attr_for_graph(
nearest_node)
nearest_process_mesh = nearest_node_dis_attr.process_mesh
cur_node_dist_attr = self._dist_context.get_dist_attr_for_graph(
node)
cur_process_mesh = cur_node_dist_attr.process_mesh
compatible_process_mesh = compute_compatible_process_mesh(
[cur_process_mesh, nearest_process_mesh])
if compatible_process_mesh is not None \
and cur_process_mesh != compatible_process_mesh:
cur_node_dist_attr.process_mesh = compatible_process_mesh
changed = True
if changed:
reach_fix_point = False
total_changed = True
else:
reach_fix_point = True
if total_changed:
total_reach_fix_point = False
else:
total_reach_fix_point = True
def _update_dims_mapping(self):
# Complete dims_mapping for each node
reach_fix_point = False
while not reach_fix_point:
changed = False
for is_fwd in [True, False]:
all_nodes = self._dist_context.serial_ordered_nodes \
if is_fwd else reversed(self._dist_context.serial_ordered_nodes)
for node in all_nodes:
if node.is_var() and node.var() is not None:
tensor_changed = self._update_tensor_node_dims_mapping(
node, fwd=is_fwd)
if tensor_changed:
changed = True
if node.is_op() and node.op() is not None:
op_changed = self._update_op_node_dims_mapping(
node, fwd=is_fwd)
if op_changed:
changed = True
if changed:
reach_fix_point = False
else:
reach_fix_point = True
def complete_forward_annotation(self, serial_main_program):
""" Complete annotation for the partial annotated serial_main_program.
Arguments:
serial_main_program: partial annotated serial_main_program.
Returns:
serial_main_program: completed annotated serial_main_program.
"""
# Use the default distribted context for completeion if there is no one
self._dist_context.serial_program = serial_main_program
# Initialize distributed attributes for all var and op node in serial_main_program
self._dist_context.init_dist_attr_for_program()
# Initialize distributed attributes for all var and op node in graph
self._dist_context.init_dist_attr_for_graph()
self._update_process_mesh()
# Complete dims_mapping for each node
self._update_dims_mapping()
# Copy the corresponding distributed attribute from graph to serial_main_program
self._dist_context.copy_dist_attr_from_graph_to_program()
self._dist_context.clear_dist_info_for_graph()
# print_serial_main_program_with_dist_attr(serial_main_program, self._dist_context)
# Do the validation check and amend some completion
self._dist_context.amend_dist_attr_for_program()
# print_serial_main_program_with_dist_attr(serial_main_program, self._dist_context)
self._dist_context.validate_dist_attr_for_program()
return serial_main_program
def complete_backward_annotation(self, serial_main_program):
"""Complete the annotation of vars and ops in the backward phase for parallel program."""
def _is_grad_var_name(name):
if "@GRAD" in name:
return True
return False
def _get_forward_varname_from_grad_varname(grad_var_name):
assert _is_grad_var_name(
grad_var_name), "[{}] is not a grad varnme.".format(
grad_var_name)
return grad_var_name[:grad_var_name.find("@GRAD")]
def _get_op_by_id(ops, id):
for op in ops:
if op.desc.id() == id:
return op
return None
first_backward_op_idx = -1
for idx, op in enumerate(serial_main_program.global_block().ops):
if int(op.attr('op_role')) == int(
int(core.op_proto_and_checker_maker.OpRole.Backward) | int(
core.op_proto_and_checker_maker.OpRole.Loss)):
assert op.type == "fill_constant"
first_backward_op_idx = idx
break
assert first_backward_op_idx >= 0, "No backward procedure found in this program."
ops = list(serial_main_program.global_block().ops)
vars = serial_main_program.global_block().vars
dist_op_context = self._dist_context.dist_op_context
for idx in range(first_backward_op_idx, len(ops)):
# complete the initial grad loss op
if idx == first_backward_op_idx:
assert ops[idx].type == "fill_constant"
assert len(
ops[idx].input_arg_names
) == 0, "first backward op should has only ONE output, but got [{}]".format(
len(ops[idx].input_arg_names))
assert len(
ops[idx].output_arg_names
) == 1, "first backward op should has only ONE output, but got [{}]".format(
len(ops[idx].output_arg_names))
grad_var = vars[ops[idx].output_arg_names[0]]
forward_var_name = _get_forward_varname_from_grad_varname(
grad_var.name)
forward_var = vars[forward_var_name]
# TODO complete other attribte for grad var
tensor_dist_attr = TensorDistributedAttribute()
process_mesh = self._dist_context.get_tensor_dist_attr_for_program(
forward_var).process_mesh
dims_mapping = self._dist_context.get_tensor_dist_attr_for_program(
| |
<reponame>chncwang/mindspore<filename>model_zoo/official/nlp/tinybert/src/tinybert_for_gd_td.py<gh_stars>1-10
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tinybert model"""
import re
import mindspore.nn as nn
from mindspore import context
from mindspore.ops import operations as P
from mindspore.ops import functional as F
from mindspore.ops import composite as C
from mindspore.common.tensor import Tensor
from mindspore.common import dtype as mstype
from mindspore.common.parameter import Parameter
from mindspore.communication.management import get_group_size
from mindspore.nn.wrap.grad_reducer import DistributedGradReducer
from mindspore.context import ParallelMode
from mindspore.train.serialization import load_checkpoint, load_param_into_net
from .tinybert_model import BertModel, TinyBertModel, BertModelCLS
GRADIENT_CLIP_TYPE = 1
GRADIENT_CLIP_VALUE = 1.0
clip_grad = C.MultitypeFuncGraph("clip_grad")
@clip_grad.register("Number", "Number", "Tensor")
def _clip_grad(clip_type, clip_value, grad):
"""
Clip gradients.
Inputs:
clip_type (int): The way to clip, 0 for 'value', 1 for 'norm'.
clip_value (float): Specifies how much to clip.
grad (tuple[Tensor]): Gradients.
Outputs:
tuple[Tensor], clipped gradients.
"""
if clip_type not in (0, 1):
return grad
dt = F.dtype(grad)
if clip_type == 0:
new_grad = C.clip_by_value(grad, F.cast(F.tuple_to_array((-clip_value,)), dt),
F.cast(F.tuple_to_array((clip_value,)), dt))
else:
new_grad = nn.ClipByNorm()(grad, F.cast(F.tuple_to_array((clip_value,)), dt))
return new_grad
grad_scale = C.MultitypeFuncGraph("grad_scale")
reciprocal = P.Reciprocal()
@grad_scale.register("Tensor", "Tensor")
def tensor_grad_scale(scale, grad):
return grad * reciprocal(scale)
class ClipGradients(nn.Cell):
"""
Clip gradients.
Args:
grads (list): List of gradient tuples.
clip_type (Tensor): The way to clip, 'value' or 'norm'.
clip_value (Tensor): Specifies how much to clip.
Returns:
List, a list of clipped_grad tuples.
"""
def __init__(self):
super(ClipGradients, self).__init__()
self.clip_by_norm = nn.ClipByNorm()
self.cast = P.Cast()
self.dtype = P.DType()
def construct(self,
grads,
clip_type,
clip_value):
"""clip gradients"""
if clip_type not in (0, 1):
return grads
new_grads = ()
for grad in grads:
dt = self.dtype(grad)
if clip_type == 0:
t = C.clip_by_value(grad, self.cast(F.tuple_to_array((-clip_value,)), dt),
self.cast(F.tuple_to_array((clip_value,)), dt))
else:
t = self.clip_by_norm(grad, self.cast(F.tuple_to_array((clip_value,)), dt))
new_grads = new_grads + (t,)
return new_grads
class SoftCrossEntropy(nn.Cell):
"""SoftCrossEntropy loss"""
def __init__(self):
super(SoftCrossEntropy, self).__init__()
self.log_softmax = P.LogSoftmax(axis=-1)
self.softmax = P.Softmax(axis=-1)
self.reduce_mean = P.ReduceMean()
self.cast = P.Cast()
def construct(self, predicts, targets):
likelihood = self.log_softmax(predicts)
target_prob = self.softmax(targets)
loss = self.reduce_mean(-target_prob * likelihood)
return self.cast(loss, mstype.float32)
class BertNetworkWithLoss_gd(nn.Cell):
"""
Provide bert pre-training loss through network.
Args:
config (BertConfig): The config of BertModel.
is_training (bool): Specifies whether to use the training mode.
use_one_hot_embeddings (bool): Specifies whether to use one-hot for embeddings. Default: False.
Returns:
Tensor, the loss of the network.
"""
def __init__(self, teacher_config, teacher_ckpt, student_config, is_training, use_one_hot_embeddings=False,
is_att_fit=True, is_rep_fit=True):
super(BertNetworkWithLoss_gd, self).__init__()
# load teacher model
self.teacher = BertModel(teacher_config, False, use_one_hot_embeddings)
param_dict = load_checkpoint(teacher_ckpt)
new_param_dict = {}
for key, value in param_dict.items():
new_key = re.sub('^bert.bert.', 'teacher.', key)
new_param_dict[new_key] = value
load_param_into_net(self.teacher, new_param_dict)
# no_grad
self.teacher.set_train(False)
params = self.teacher.trainable_params()
for param in params:
param.requires_grad = False
# student model
self.bert = TinyBertModel(student_config, is_training, use_one_hot_embeddings)
self.cast = P.Cast()
self.fit_dense = nn.Dense(student_config.hidden_size,
teacher_config.hidden_size).to_float(teacher_config.compute_type)
self.teacher_layers_num = teacher_config.num_hidden_layers
self.student_layers_num = student_config.num_hidden_layers
self.layers_per_block = int(self.teacher_layers_num / self.student_layers_num)
self.is_att_fit = is_att_fit
self.is_rep_fit = is_rep_fit
self.loss_mse = nn.MSELoss()
self.select = P.Select()
self.zeroslike = P.ZerosLike()
self.dtype = teacher_config.dtype
def construct(self,
input_ids,
input_mask,
token_type_id):
"""general distill network with loss"""
# teacher model
_, _, _, teacher_seq_output, teacher_att_output = self.teacher(input_ids, token_type_id, input_mask)
# student model
_, _, _, student_seq_output, student_att_output = self.bert(input_ids, token_type_id, input_mask)
total_loss = 0
if self.is_att_fit:
selected_teacher_att_output = ()
selected_student_att_output = ()
for i in range(self.student_layers_num):
selected_teacher_att_output += (teacher_att_output[(i + 1) * self.layers_per_block - 1],)
selected_student_att_output += (student_att_output[i],)
att_loss = 0
for i in range(self.student_layers_num):
student_att = selected_student_att_output[i]
teacher_att = selected_teacher_att_output[i]
student_att = self.select(student_att <= self.cast(-100.0, mstype.float32), self.zeroslike(student_att),
student_att)
teacher_att = self.select(teacher_att <= self.cast(-100.0, mstype.float32), self.zeroslike(teacher_att),
teacher_att)
att_loss += self.loss_mse(student_att, teacher_att)
total_loss += att_loss
if self.is_rep_fit:
selected_teacher_seq_output = ()
selected_student_seq_output = ()
for i in range(self.student_layers_num + 1):
selected_teacher_seq_output += (teacher_seq_output[i * self.layers_per_block],)
fit_dense_out = self.fit_dense(student_seq_output[i])
fit_dense_out = self.cast(fit_dense_out, self.dtype)
selected_student_seq_output += (fit_dense_out,)
rep_loss = 0
for i in range(self.student_layers_num + 1):
teacher_rep = selected_teacher_seq_output[i]
student_rep = selected_student_seq_output[i]
rep_loss += self.loss_mse(student_rep, teacher_rep)
total_loss += rep_loss
return self.cast(total_loss, mstype.float32)
class BertTrainWithLossScaleCell(nn.Cell):
"""
Encapsulation class of bert network training.
Append an optimizer to the training network after that the construct
function can be called to create the backward graph.
Args:
network (Cell): The training network. Note that loss function should have been added.
optimizer (Optimizer): Optimizer for updating the weights.
scale_update_cell (Cell): Cell to do the loss scale. Default: None.
"""
def __init__(self, network, optimizer, scale_update_cell=None):
super(BertTrainWithLossScaleCell, self).__init__(auto_prefix=False)
self.network = network
self.network.set_grad()
self.weights = optimizer.parameters
self.optimizer = optimizer
self.grad = C.GradOperation(get_by_list=True,
sens_param=True)
self.reducer_flag = False
self.allreduce = P.AllReduce()
self.parallel_mode = context.get_auto_parallel_context("parallel_mode")
if self.parallel_mode in [ParallelMode.DATA_PARALLEL, ParallelMode.HYBRID_PARALLEL]:
self.reducer_flag = True
self.grad_reducer = F.identity
self.degree = 1
if self.reducer_flag:
self.degree = get_group_size()
self.grad_reducer = DistributedGradReducer(optimizer.parameters, False, self.degree)
self.is_distributed = (self.parallel_mode != ParallelMode.STAND_ALONE)
self.cast = P.Cast()
self.alloc_status = P.NPUAllocFloatStatus()
self.get_status = P.NPUGetFloatStatus()
self.clear_status = P.NPUClearFloatStatus()
self.reduce_sum = P.ReduceSum(keep_dims=False)
self.base = Tensor(1, mstype.float32)
self.less_equal = P.LessEqual()
self.hyper_map = C.HyperMap()
self.loss_scale = None
self.loss_scaling_manager = scale_update_cell
if scale_update_cell:
self.loss_scale = Parameter(Tensor(scale_update_cell.get_loss_scale(), dtype=mstype.float32))
def construct(self,
input_ids,
input_mask,
token_type_id,
sens=None):
"""Defines the computation performed."""
weights = self.weights
loss = self.network(input_ids,
input_mask,
token_type_id)
if sens is None:
scaling_sens = self.loss_scale
else:
scaling_sens = sens
# alloc status and clear should be right before gradoperation
init = self.alloc_status()
init = F.depend(init, loss)
clear_status = self.clear_status(init)
scaling_sens = F.depend(scaling_sens, clear_status)
grads = self.grad(self.network, weights)(input_ids,
input_mask,
token_type_id,
self.cast(scaling_sens,
mstype.float32))
# apply grad reducer on grads
grads = self.grad_reducer(grads)
grads = self.hyper_map(F.partial(grad_scale, scaling_sens * self.degree), grads)
grads = self.hyper_map(F.partial(clip_grad, GRADIENT_CLIP_TYPE, GRADIENT_CLIP_VALUE), grads)
init = F.depend(init, grads)
get_status = self.get_status(init)
init = F.depend(init, get_status)
flag_sum = self.reduce_sum(init, (0,))
if self.is_distributed:
# sum overflow flag over devices
flag_reduce = self.allreduce(flag_sum)
cond = self.less_equal(self.base, flag_reduce)
else:
cond = self.less_equal(self.base, flag_sum)
overflow = cond
if sens is None:
overflow = self.loss_scaling_manager(self.loss_scale, cond)
if overflow:
succ = False
else:
succ = self.optimizer(grads)
ret = (loss, cond, scaling_sens)
return F.depend(ret, succ)
class BertTrainCell(nn.Cell):
"""
Encapsulation class of bert network training.
Append an optimizer to the training network after that the construct
function can be called to create the backward graph.
Args:
network (Cell): The training network. Note that loss function should have been added.
optimizer (Optimizer): Optimizer for updating the weights.
sens (Number): The adjust parameter. Default: 1.0.
"""
def __init__(self, network, optimizer, sens=1.0):
super(BertTrainCell, self).__init__(auto_prefix=False)
self.network = network
self.network.set_grad()
self.weights = optimizer.parameters
self.optimizer = optimizer
self.sens = sens
self.grad = C.GradOperation(get_by_list=True,
sens_param=True)
self.reducer_flag = False
self.parallel_mode = context.get_auto_parallel_context("parallel_mode")
if self.parallel_mode in [ParallelMode.DATA_PARALLEL, ParallelMode.HYBRID_PARALLEL]:
self.reducer_flag = True
self.grad_reducer = F.identity
self.degree = 1
if self.reducer_flag:
mean = context.get_auto_parallel_context("gradients_mean")
self.degree = get_group_size()
self.grad_reducer = DistributedGradReducer(optimizer.parameters, mean, self.degree)
self.cast = P.Cast()
self.hyper_map = C.HyperMap()
def construct(self,
input_ids,
input_mask,
token_type_id):
"""Defines the computation performed."""
weights = self.weights
loss = self.network(input_ids,
input_mask,
token_type_id)
grads = self.grad(self.network, weights)(input_ids,
input_mask,
token_type_id,
self.cast(F.tuple_to_array((self.sens,)),
mstype.float32))
# apply grad reducer on grads
grads = self.grad_reducer(grads)
grads = self.hyper_map(F.partial(clip_grad, GRADIENT_CLIP_TYPE, GRADIENT_CLIP_VALUE), grads)
succ = self.optimizer(grads)
return F.depend(loss, succ)
class BertNetworkWithLoss_td(nn.Cell):
"""
Provide bert pre-training loss through network.
Args:
config (BertConfig): The config of BertModel.
is_training (bool): Specifies whether to use the training mode.
use_one_hot_embeddings (bool): Specifies whether to use one-hot for embeddings. Default: False.
Returns:
Tensor, the loss of the network.
"""
def __init__(self, teacher_config, teacher_ckpt, student_config, student_ckpt,
is_training, task_type, num_labels, use_one_hot_embeddings=False,
is_predistill=True, is_att_fit=True, is_rep_fit=True,
temperature=1.0, dropout_prob=0.1):
super(BertNetworkWithLoss_td, self).__init__()
# load teacher model
self.teacher = BertModelCLS(teacher_config, False, num_labels, dropout_prob,
use_one_hot_embeddings, "teacher")
param_dict = load_checkpoint(teacher_ckpt)
new_param_dict = {}
for key, value in param_dict.items():
new_key = re.sub('^bert.', 'teacher.', key)
new_param_dict[new_key] = value
load_param_into_net(self.teacher, new_param_dict)
# no_grad
self.teacher.set_train(False)
params = self.teacher.trainable_params()
for param in params:
param.requires_grad = False
# load student model
self.bert = BertModelCLS(student_config, is_training, num_labels, dropout_prob,
use_one_hot_embeddings, "student")
param_dict = load_checkpoint(student_ckpt)
if is_predistill:
new_param_dict = {}
for key, value in param_dict.items():
new_key = re.sub('tinybert_', 'bert_', 'bert.' + key)
new_param_dict[new_key] = value
load_param_into_net(self.bert, new_param_dict)
else:
new_param_dict = {}
for key, value in param_dict.items():
new_key = re.sub('tinybert_', 'bert_', key)
new_param_dict[new_key] = value
load_param_into_net(self.bert, new_param_dict)
self.cast = P.Cast()
| |
contabiliza todas as jogadas,
# isto é, numa mesa de 4 jogadores essa lista tem tamanho 4.
# Como só me interesso pela jogada dos outros jogadores,
# só preciso das n-1 primeiras entradas dessa lista.
if carta_ja_jogada != '':
# Se tem carta jogada, adiciona na lista
lista_de_entrada_do_decisor.append(self._ID_de_carta[
carta_ja_jogada.numero
])
lista_de_entrada_do_decisor.append(self._ID_de_naipe[
carta_ja_jogada.naipe
])
else:
# Caso contrário, adiciona 0 para o número e o naipe
lista_de_entrada_do_decisor.append(0)
lista_de_entrada_do_decisor.append(0)
# A lista de cartas_jogadas_na_mesa pode estar incompleta.
# Neste caso, deve-se preencher lista_de_entrada_do_decisor com 0s.
while (len(lista_de_entrada_do_decisor) < 14):
lista_de_entrada_do_decisor.append(0)
# Fazendo uma checagem de segurança
if (len(lista_de_entrada_do_decisor) != 14):
# Pare tudo que deu algo errado
raise
# Com essa lista montada, podemos colocá-la na entrada do decisor,
# gerando uma saída com três elementos (um para cada carta)
# A carta a ser jogada vai depender de qual desses elementos é o maior
# decisao = self._tomador_de_decisao(tf.convert_to_tensor(np.array([[lista_de_entrada_do_decisor],], dtype=np.float32)))
decisao = self._tomador_de_decisao(np.array([lista_de_entrada_do_decisor,]))
#print(decisao[0][0])
# É possível que decisao seja um tensor, o que mudaria a forma de acesso.
# decisao[0] --> carta_1
# decisao[1] --> carta_2
# decisao[2] --> carta_2
# Procurar o índice do argumento máximo
escolha = np.argmax(decisao) + 1
# argmax retorna um valor entre 0 e 2. Por isso, adiciono 1 para ficar compatível
# com a lógica.
# Preciso fazer o tratamento caso a mão tenha menos de três cartas e o decisor
# acabe escolhendo uma posição que não tenha carta
if len(self._mao) < 3 and escolha == 3:
# Escolheu uma posição que não existe carta
if (len(self._mao) > 1 and (decisao[0][1] > decisao[0][0])):
# A segunda maior probabilidade é da carta 2
# e na mão há duas cartas
escolha = 2
else:
# A segunda maior probabilidade é da carta 1
# neste caso não importa a quantidade de cartas na mão
escolha = 1
if len(self._mao) < 2 and escolha == 2:
# Escolheu uma posição que não existe carta
# Neste caso, só podemos ter a opção 1
escolha = 1
return escolha
# Pegando os pesos da rede neural. Retorna um array do numpy.
def get_pesos_da_rede_neural(self):
return self._tomador_de_decisao.get_weights()
# Configurando os pesos da rede neural. Deve receber um array do numpy compatível com
# a arquitetura da rede
def set_pesos_da_rede_neural(self, novos_pesos):
self._tomador_de_decisao.set_weights(novos_pesos)
'''
class MesaDeBisca():
# Classe a ser usada como base ao jogo de bisca
def __init__(self, numero_de_jogadores):
self._baralho = None #BaralhoDeBisca()
self._numero_de_jogadores = numero_de_jogadores
self._jogadores = []
self._cartas_jogadas = ['' for p in range(numero_de_jogadores)]
self._quem_jogou_as_cartas = ['' for p in range(numero_de_jogadores)]
#A ideia é que o nome do jogador apareça em _quem_jogou_as_cartas
#A partir de sua posição, pegar a carta jogada
self._carta_da_mesa = None
# A ideia é usar o parâmetro do número de jogadores para o jogo saber quando pode começar
# _jogadores deve ser uma lista com _numero_de_jogadores JogadorDeBisca
self._equipe_A = []
self._pontos_A = 0
self._equipe_B = []
self._pontos_B = 0
# Após completar a lista _jogadores, as listas _equipe_? vão ser preenchidas
def adicionar_jogador(self, nome):
# função a ser chamada para preencher a lista de jogadores
if (len(self._jogadores) < self._numero_de_jogadores):
while(True):
try:
print('\n Escolha o tipo do jogador: ')
print('(1) Jogador humano')
print('(2) NPC Dummy')
#print('(3) NPC Smart')
tipo_do_jogador = input()
if (int(tipo_do_jogador) == 1):
self._jogadores.append(JogadorDeBisca(nome))
print('Jogador humano adicionado!')
break
elif (int(tipo_do_jogador) == 2):
self._jogadores.append(DummyPlayer(nome))
print('NPC Dummy adicionado!')
break
#elif (int(tipo_do_jogador) == 3):
# self._jogadores.append(SmartPlayer(nome))
# print('NPC Smart adicionado!')
# break
else:
raise
except:
print('Por favor, digite um numero entre as opcoes em parenteses!')
else:
print("Não é possível adicionar mais jogadores!")
def dividir_equipes(self):
# função a ser chamada para preencher as listas de equipes
if (len(self._jogadores) == self._numero_de_jogadores):
# adiciona na equipe A os jogadores que estiverem em uma ordem PAR na lista de jogadores
self._equipe_A = \
[player for player in self._jogadores if ((self._jogadores.index(player)% 2) == 0)]
# adiciona na equipe B os jogadores que estiverem em uma ordem ÍMPAR na lista de jogadores
self._equipe_B = \
[player for player in self._jogadores if ((self._jogadores.index(player)% 2) != 0)]
else:
print("A lista de jogadores precisa estar completa!")
@property
def equipe_A(self):
return self._equipe_A
@property
def equipe_B(self):
return self._equipe_B
@property
def jogadores(self):
return self._jogadores
def __len__(self):
return self._baralho.__len__()
@property
def cartas_jogadas(self):
return self._cartas_jogadas
@property
def carta_da_mesa(self):
return self._carta_da_mesa
def puxar_uma_carta(self, index_do_jogador):
# função para tirar uma carta do baralho (ou da mesa) e passar para o jogador
# preciso passar a posição do jogador na lista de jogadores
if (len(self._baralho) > 0):
# ainda há cartas no baralho
carta = self._baralho.tirar_uma_carta()
self._jogadores[index_do_jogador].adicionar_carta_na_mao(carta)
else:
# crio uma cópia da carta da mesa e dou ao jogador
# esta condição só deve ocorrer uma única vez!!!
carta = Carta(self._carta_da_mesa.naipe, self._carta_da_mesa.numero)
self._jogadores[index_do_jogador].adicionar_carta_na_mao(carta)
# def mostrar_carta_do_jogador(self, index_do_jogador):
# # mostra as cartas do jogador identificado por index_do_jogador (posição da lista de jogadores)
# print(self._jogadores[index_do_jogador].nome)
# print(self._jogadores[index_do_jogador])
def prepara_a_mesa(self):
# função para gerar o baralho, preencher a carta da mesa e dar 3 cartas iniciais aos jogadores
self._baralho = BaralhoDeBisca(self._numero_de_jogadores % 2 == 0)
self._baralho.embaralhar()
self._carta_da_mesa = self._baralho.tirar_uma_carta()
for player in self._jogadores:
while (len(player) < JogadorDeBisca.tamanho_max_mao):
# vai dando cartas ao jogador até preencher 3 na mão
self.puxar_uma_carta(self._jogadores.index(player))
def atualiza_cartas_jogadas(self, player, carta_jogada, vez):
# adiciona na lista de jogadas um elemento do tipo [jogador, carta_jogada]
# vez indica a posição nas listas. Vai de 0 a numero_de_jogadores-1
if (vez < self._numero_de_jogadores):
self._quem_jogou_as_cartas[vez] = player
self._cartas_jogadas[vez] = carta_jogada
else:
print("Erro! O número excedeu a quantidade de jogadores!")
def encerra_rodada (self):
# identifica quem jogou a carta mais forte e retorna o nome para que o jogo saiba quem começa depois
pos = 0 #qual a posição em cartas_jogadas
jogador_mais_forte = None
carta_mais_forte = self._cartas_jogadas[pos] # qual a carta para efeito de comparação
for jogada in self._cartas_jogadas: #tá preenchendo errado o cartas_jogadas
if jogada.naipe == carta_mais_forte.naipe:
# se o naipe for igual, quem define a mais forte é a pontuação
if jogada.pontos >= carta_mais_forte.pontos:
carta_mais_forte = jogada
jogador_mais_forte = self._quem_jogou_as_cartas[pos] #jogador
else: #naipes diferentes
# se o naipe é diferente, preciso verificar se a nova carta verificada é da mesa
if jogada.naipe == self._carta_da_mesa.naipe:
carta_mais_forte = jogada
jogador_mais_forte = self._quem_jogou_as_cartas[pos] #jogador
pos += 1
if jogador_mais_forte in self._equipe_A:
# se o jogador da carta mais forte da rodada está na equipe A
for carta in self._cartas_jogadas:
# adicionar cada carta das jogadas na pilha de pontos dos jogadores da equipe A
for jogador in self._equipe_A:
jogador.adicionar_carta_na_pilha(carta)
else:
# se o jogador da carta mais forte da rodada está na equipe B
for carta in self._cartas_jogadas:
# adicionar cada carta das jogadas na pilha de pontos dos jogadores da equipe B
for jogador in self._equipe_B:
jogador.adicionar_carta_na_pilha(carta)
self._cartas_jogadas = ['' for p in range(self._numero_de_jogadores)]
# esvaziar a lista de cartas jogadas
self._quem_jogou_as_cartas = ['' for p in range(self._numero_de_jogadores)]
# esvaziar a lista de quem jogou as cartas
return jogador_mais_forte
def acabou_jogo(self):
# verifica se a mão de todos os jogadores esvaziou. Se sim, retorna True.
# Caso contrário, retorna False.
acabou_jogo = True
for jogador in self._jogadores:
if (len(jogador) != 0):
acabou_jogo = False
break
return acabou_jogo
def imprimir_mesa(self):
# método para imprimir o conteúdo da mesa
if self._numero_de_jogadores == 2:
print (self._jogadores[0].nome) # nome do jogador
print (self._cartas_jogadas\
[self._quem_jogou_as_cartas.index(self._jogadores[0].nome)]\
.__str__()) # tipo Carta !!!!
print ('\n\n')
print('Mesa: {}'.format(self._carta_da_mesa.__str__()))
print ('\n\n')
print (self._jogadores[1].nome) # nome do jogador
print (self._cartas_jogadas\
[self._quem_jogou_as_cartas.index(self._jogadores[1].nome)]\
.__str__()) # tipo Carta
elif self._numero_de_jogadores == 3:
print (self._jogadores[0].nome) # nome do jogador
print (self._cartas_jogadas\
[self._quem_jogou_as_cartas.index(self._jogadores[0].nome)]\
.__str__()) # tipo Carta
print ('\n\n')
print('Mesa: {}'.format(self._carta_da_mesa.__str__()))
print ('\n\n')
print ('{} || {}'.format
(self._jogadores[1].nome, self._jogadores[2].nome))
# nome do jogador
print ('{} || {}'.format
(self._cartas_jogadas[self._quem_jogou_as_cartas.index(self._jogadores[1].nome)]\
.__str__(), self._cartas_jogadas\
[self._quem_jogou_as_cartas.index(self._jogadores[2].nome)].__str__()))
# tipo Carta
elif self._numero_de_jogadores == 4:
print('\n')
print(40*'-')
print ('\n{} || {}'.format
(self._jogadores[0].nome, self._jogadores[1].nome))
# nome do jogador fixado
try:
| |
\n in_assignment will return True if the Sim is on any type of \n asignment for its current career.\n \n in_specific_assignment will return True only if the current\n active assignment matches the assignment specified.\n ',
in_assignment=AssignmentActiveFactory(),
in_specific_assignment=AssignmentSpecificFactory(),
default='in_assignment'),
'negate':Tunable(description='\n If checked, test will pass if the Sim is not on an assignment.\n ',
tunable_type=bool,
default=False)}
def get_expected_args(self):
return {'test_targets': self.participant}
@cached_test
def __call__(self, test_targets, **kwargs):
for sim in test_targets:
career = sim.career_tracker.get_on_assignment_career()
if career is not None:
break
else:
career = None
if career is not None:
if self.test_type is not None:
if self.test_type(career):
if self.negate:
return TestResult(False, 'Sim has an assignment', tooltip=(self.tooltip))
return TestResult.TRUE
if self.negate:
return TestResult.TRUE
return TestResult(False, 'Sim has no assignment', tooltip=(self.tooltip))
class GigActiveFactory(HasTunableSingletonFactory, AutoFactoryInit):
def test(self, career):
if career is None:
return False
return career.get_current_gig() is not None
class GigSpecificFactory(HasTunableSingletonFactory, AutoFactoryInit):
FACTORY_TUNABLES = {'gigs':TunableList(description="\n A list of gigs. If any tuned gig is the sim's current gig, this test\n will return True.\n ",
tunable=sims4.tuning.tunable.TunableReference(description='\n Aspiration that needs to be completed for satisfying the\n daily assignment.\n ',
manager=(services.get_instance_manager(sims4.resources.Types.CAREER_GIG)),
pack_safe=True),
minlength=1),
'active_objective':OptionalTunable(description='\n If enabled, this objective is also required to be active on any of\n the tuned Gigs.\n ',
tunable=sims4.tuning.tunable.TunableReference(description='\n The objective that needs to be active on the gig.\n ',
manager=(services.get_instance_manager(sims4.resources.Types.OBJECTIVE)),
pack_safe=True))}
def test(self, career):
if career is None:
return False
else:
current_gig = career.get_current_gig()
if current_gig is None:
return False
if self.active_objective is not None:
return current_gig.is_objective_active(self.active_objective) or False
current_gig_id = current_gig.guid64
return any((gig.guid64 == current_gig_id for gig in self.gigs))
class CareerGigTest(HasTunableSingletonFactory, AutoFactoryInit, event_testing.test_base.BaseTest):
FACTORY_TUNABLES = {'participant':TunableEnumEntry(description='\n Who or what to apply this test to.\n ',
tunable_type=ParticipantTypeSim,
default=ParticipantTypeSim.Actor),
'test_type':TunableVariant(description='\n The test to perform. Can check either a specific list of gigs or\n if any gig is currently scheduled.\n ',
any_gig=GigActiveFactory.TunableFactory(description='\n Return True if any gig is scheduled for the career.\n '),
specific_gigs=GigSpecificFactory.TunableFactory(description='\n Return True if any of the tuned gigs is scheduled for the\n career.\n '),
default='any_gig'),
'career':TunablePackSafeReference(description='\n The career to test for gigs\n ',
manager=services.get_instance_manager(sims4.resources.Types.CAREER)),
'negate':Tunable(description='\n If checked, test will pass if the Sim does not have the gigs.\n ',
tunable_type=bool,
default=False)}
def get_expected_args(self):
return {'test_targets': self.participant}
@cached_test
def __call__(self, test_targets, **kwargs):
if self.career is None:
if self.negate:
return TestResult.TRUE
return TestResult(False, "No career is tuned or career isn't available ", tooltip=(self.tooltip))
tested_career_uid = self.career.guid64
has_career_gig = False
for sim in test_targets:
career = sim.career_tracker.get_career_by_uid(tested_career_uid)
if career is None:
continue
result = self.test_type.test(career)
if result:
has_career_gig = True
break
if self.negate:
if has_career_gig:
return TestResult(False, 'Sim has gig', tooltip=(self.tooltip))
return TestResult.TRUE
if has_career_gig:
return TestResult.TRUE
return TestResult(False, 'Sim does not have gig', tooltip=(self.tooltip))
return TestResult(False, 'No test targets', tooltip=(self.tooltip))
class SimoleonsTestEvents(enum.Int):
AllSimoloenEvents = 0
OnExitBuildBuy = TestEvent.OnExitBuildBuy
SimoleonsEarned = TestEvent.SimoleonsEarned
class _SimoleonTestValueContextBase(HasTunableSingletonFactory, AutoFactoryInit):
FACTORY_TUNABLES = {'subject': TunableEnumEntry(description='\n Who to examine for Simoleon values.\n ',
tunable_type=ParticipantTypeSingleSim,
default=(ParticipantType.Actor),
invalid_enums=(
ParticipantTypeSingleSim.Invalid,))}
def test(self, resolver):
return TestResult.TRUE
def get_value(self, resolver):
subject = resolver.get_participant(self.subject)
return self._get_value(subject)
def _get_value(self, subject):
raise NotImplementedError
class _SimoleonTestValueContextNetWorth(_SimoleonTestValueContextBase):
def _get_value(self, subject):
household = services.household_manager().get_by_sim_id(subject.sim_id)
return household.household_net_worth()
class _SimoleonTestValueContextPropertyOnly(_SimoleonTestValueContextBase):
def _get_value(self, subject):
household = services.household_manager().get_by_sim_id(subject.sim_id)
return household.get_property_value()
class _SimoleonTestValueContextTotalCash(_SimoleonTestValueContextBase):
def _get_value(self, subject):
household = services.household_manager().get_by_sim_id(subject.sim_id)
return household.funds.money
class _SimoleonTestValueContextCurrentValue(_SimoleonTestValueContextBase):
FACTORY_TUNABLES = {'subject': TunableEnumEntry(description='\n Who to examine for Simoleon values.\n ',
tunable_type=ParticipantTypeObject,
default=(ParticipantType.Object))}
def _get_value(self, subject):
return getattr(subject, 'current_value', 0)
class _SimoleonTestValueContextRetailFunds(_SimoleonTestValueContextBase):
def test(self, resolver):
sim = resolver.get_participant(self.subject)
if sim is None:
return TestResultNumeric(False, 'Subject {} could not be resolved in the SimoleonValueTest.', (self.subject), current_value=0, goal_value=0)
if services.business_service().get_business_manager_for_zone():
return TestResult.TRUE
return TestResultNumeric(False, "Current lot is either not a business lot or the Sim {} doesn't own it.", sim, current_value=0, goal_value=0)
def _get_value(self, _):
business_manager = services.business_service().get_business_manager_for_zone()
if business_manager:
return business_manager.funds.money
return 0
class SimoleonsTest(event_testing.test_base.BaseTest):
FACTORY_TUNABLES = {'value_context':TunableVariant(description='\n The context against which to test the value.\n ',
net_worth=_SimoleonTestValueContextNetWorth.TunableFactory(),
property_only=_SimoleonTestValueContextPropertyOnly.TunableFactory(),
total_cash=_SimoleonTestValueContextTotalCash.TunableFactory(),
current_value=_SimoleonTestValueContextCurrentValue.TunableFactory(),
retail_funds=_SimoleonTestValueContextRetailFunds.TunableFactory(),
default='net_worth'),
'is_apartment':OptionalTunable(description='\n If checked, test will pass if the zone is an apartment. If\n unchecked, test passes if the zone is NOT an apartment. Useful\n in aspiration tuning, to discriminate between property\n types in tests of lot value. Allows "Own a House worth X" and\n "Own an Apartment worth X"\n ',
disabled_name="Don't_Test",
enabled_name='Is_or_is_not_apartment_zone',
tunable=TunableTuple(description='\n Test whether the zone is an apartment or not.\n ',
is_apartment=Tunable(description='\n If checked, test will pass if the zone is an apartment.\n If unchecked, test passes if the zone is NOT an\n apartment.\n ',
tunable_type=bool,
default=True),
consider_penthouse_an_apartment=Tunable(description='\n If enabled, we will consider penthouses to be\n apartments when testing them against the apartment\n check.\n ',
tunable_type=bool,
default=True))),
'value_threshold':TunableThreshold(description='\n Amounts in Simoleons required to pass\n '),
'test_event':TunableEnumEntry(description='\n The event that we want to trigger this instance of the tuned test on. NOTE: OnClientConnect is\n still used as a trigger regardless of this choice in order to update the UI.\n ',
tunable_type=SimoleonsTestEvents,
default=SimoleonsTestEvents.AllSimoloenEvents)}
def __init__(self, value_context, is_apartment, value_threshold, test_event, **kwargs):
(super().__init__)(**kwargs)
self.value_context = value_context
self.is_apartment = is_apartment
self.value_threshold = value_threshold
if test_event == SimoleonsTestEvents.AllSimoloenEvents:
self.test_events = (
TestEvent.SimoleonsEarned, TestEvent.OnExitBuildBuy)
else:
self.test_events = (test_event,)
def get_expected_args(self):
return {'resolver': RESOLVER_PARTICIPANT}
@cached_test
def __call__(self, resolver):
if self.is_apartment is not None:
zone_id = services.current_zone_id()
is_zone_apartment = services.get_plex_service().is_zone_an_apartment(zone_id, consider_penthouse_an_apartment=(self.is_apartment.consider_penthouse_an_apartment))
if self.is_apartment.is_apartment != is_zone_apartment:
return TestResult(False, 'Zone failed apartment test', tooltip=(self.tooltip))
else:
test_result = self.value_context.test(resolver)
if not test_result:
test_result.goal_value = self.value_threshold.value
test_result.tooltip = self.tooltip
return test_result
value = self.value_context.get_value(resolver)
operator_symbol = self.value_threshold.compare(value) or Operator.from_function(self.value_threshold.comparison).symbol
return TestResultNumeric(False,
'Failed value check: {} {} {} (current value: {})',
(self.value_context),
operator_symbol,
(self.value_threshold.value),
value,
current_value=value,
goal_value=(self.value_threshold.value),
is_money=True,
tooltip=(self.tooltip))
return TestResultNumeric(True, current_value=value, goal_value=(self.value_threshold.value), is_money=True)
def goal_value(self):
return self.value_threshold.value
@property
def is_goal_value_money(self):
return True
TunableSimoleonsTest = TunableSingletonFactory.create_auto_factory(SimoleonsTest)
class PartySizeTest(event_testing.test_base.BaseTest):
test_events = ()
FACTORY_TUNABLES = {'description':'Require the party size of the subject sim to match a threshold.',
'subject':TunableEnumEntry(ParticipantType, ParticipantType.Actor, description='The subject of this party size test.'),
'threshold':TunableThreshold(description='The party size threshold for this test.')}
def __init__(self, subject, threshold, **kwargs):
(super().__init__)(safe_to_skip=True, **kwargs)
self.subject = subject
self.threshold = threshold
def get_expected_args(self):
return {'test_targets': self.subject}
@cached_test
def __call__(self, test_targets=None):
for target in test_targets:
if target is None:
return TestResult(False, 'Party Size test failed because subject is not set.', tooltip=(self.tooltip))
if target.is_sim:
if target.get_sim_instance(allow_hidden_flags=ALL_HIDDEN_REASONS) is None:
return TestResult(False, '{} failed topic check: It is not an instantiated sim.', target, tooltip=(self.tooltip))
target = target.get_sim_instance(allow_hidden_flags=ALL_HIDDEN_REASONS)
main_group = target.get_main_group()
if main_group is None:
return TestResult(False, 'Party Size test failed because subject has no party attribute.', tooltip=(self.tooltip))
group_size = len(main_group)
return self.threshold.compare(group_size) or TestResult(False, 'Party Size Failed.', tooltip=(self.tooltip))
return TestResult.TRUE
TunablePartySizeTest = TunableSingletonFactory.create_auto_factory(PartySizeTest)
class PartyAgeTest(event_testing.test_base.BaseTest):
test_events = ()
FACTORY_TUNABLES = {'description':'Require all sims in the party meet with the age requirement.',
'subject':TunableEnumEntry(description='\n The subject of this party age test.',
tunable_type=ParticipantType,
default=ParticipantType.Actor),
'ages_allowed':TunableEnumSet(description='\n All valid ages.',
enum_type=sims.sim_info_types.Age,
enum_default=sims.sim_info_types.Age.ADULT,
default_enum_list=[
sims.sim_info_types.Age.TEEN,
sims.sim_info_types.Age.YOUNGADULT, sims.sim_info_types.Age.ADULT,
sims.sim_info_types.Age.ELDER]),
'check_ensemble':Tunable(description="\n If enabled then we will check against the subject's rally ensemble\n instead.\n ",
tunable_type=bool,
default=False),
'threshold':TunableThreshold(description='\n The number of sims that must pass these tests per group to pass the\n test.\n ',
default=sims4.math.Threshold(1, sims4.math.Operator.GREATER_OR_EQUAL.function))}
def __init__(self, subject, ages_allowed, check_ensemble, threshold, **kwargs):
(super().__init__)(safe_to_skip=True, **kwargs)
self.subject = subject
self.ages_allowed = ages_allowed
self.check_ensemble = check_ensemble
self.threshold = threshold
def get_expected_args(self):
return {'test_targets': self.subject}
@cached_test
def __call__(self, test_targets=None):
for target in test_targets:
if target is None:
return TestResult(False, 'Party Age test failed because subject is not set.', tooltip=(self.tooltip))
if target.is_sim:
if target.get_sim_instance(allow_hidden_flags=ALL_HIDDEN_REASONS) is None:
return TestResult(False, '{} failed topic check: It is not an instantiated sim.', target, tooltip=(self.tooltip))
target = target.get_sim_instance(allow_hidden_flags=ALL_HIDDEN_REASONS)
elif self.check_ensemble:
party = services.ensemble_service().get_ensemble_sims_for_rally(target)
else:
party = target.get_main_group()
if not party:
return TestResult(False, 'Party Age test failed because subject has no party attribute.', tooltip=(self.tooltip))
passing_sims = sum((1 for sim in party if sim.age in self.ages_allowed))
return self.threshold.compare(passing_sims) or TestResult(False, "Party has members that age doesn't meet with the requirement", tooltip=(self.tooltip))
return TestResult.TRUE
TunablePartyAgeTest = TunableSingletonFactory.create_auto_factory(PartyAgeTest)
class TotalSimoleonsEarnedTest(event_testing.test_base.BaseTest):
test_events = (
TestEvent.SimoleonsEarned,)
USES_DATA_OBJECT = True
FACTORY_TUNABLES = {'description':'This test is specifically for account based Achievements, upon event/situation completion testing if the players account has earned enough Simoleons from event rewards to pass a threshold.',
'threshold':TunableThreshold(description='The simoleons threshold for this test.'),
'earned_source':TunableEnumEntry(event_testing.event_data_const.SimoleonData,
event_testing.event_data_const.SimoleonData.TotalMoneyEarned,
description='The individual source that we want to track the simoleons from.')}
def __init__(self, threshold, earned_source, **kwargs):
| |
<filename>wrapless.py
from protocol import Protocol
from typing import Optional
import logging
import struct
from dataclasses import dataclass
from enum import Enum
from usb.core import USBTimeoutError
from Crypto.Cipher import AES
from Crypto.Hash import HMAC, SHA256
from Crypto.Random import get_random_bytes
from crccheck.crc import Crc32Mpeg2
USB_CHUNK_SIZE = 0x40
@dataclass
class Message:
_category: int
_command: int
_payload: bytes
@property
def category(self) -> int:
return self._category
@category.setter
def category(self, category) -> None:
assert category <= 0xF
self._category = category
@property
def command(self) -> int:
return self._command
@command.setter
def command(self, command) -> None:
assert command <= 0x7
self._command = command
@property
def payload(self) -> bytes:
return self._payload
@payload.setter
def payload(self, payload) -> None:
assert len(payload) <= 0xFFFF
self._payload = payload
class FingerDetectionOperation(Enum):
DOWN = 1
UP = 2
MANUAL = 3
class Device:
def __init__(self, product: int, protocol, timeout: Optional[float] = 5) -> None:
logging.debug(f"__init__({product}, {protocol}, {timeout})")
self.protocol: Protocol = protocol(0x27C6, product, timeout)
self.gtls_context: Optional[GTLSContext] = None
# FIXME Empty device reply buffer
# (Current patch while waiting for a fix)
self._empty_buffer()
def _empty_buffer(self) -> None:
logging.debug("_empty_buffer()")
try:
while True:
self.protocol.read(timeout=0.1)
except USBTimeoutError as error:
if error.backend_error_code == -7:
return
raise error
def _recv_next_chunk(self, timeout: Optional[float]) -> bytes:
for _ in range(10):
chunk = self.protocol.read(USB_CHUNK_SIZE, timeout=timeout)
if chunk:
return chunk
raise Exception("Too many empty reads")
def _recv_message_from_device(
self,
timeout: Optional[float],
) -> Message:
data = self._recv_next_chunk(timeout)
logging.debug(f"Received chunk from device: {data.hex(' ')}")
command_byte = data[0]
message_size = struct.unpack("<H", data[1:3])[0]
while len(data) - 1 < message_size:
chunk = self._recv_next_chunk(timeout)
logging.debug(f"Received chunk from device: {chunk.hex(' ')}")
contd_command_byte = chunk[0]
if contd_command_byte & 1 == 0 or contd_command_byte & 0xFE != command_byte:
raise Exception("Wrong continued chunk")
data += chunk[1:]
category = command_byte >> 4
command = (command_byte & 0xF) >> 1
data = data[: message_size + 3]
msg_checksum = data[-1]
data = data[:-1]
if msg_checksum != 0x88:
checksum = 0xAA - sum(data) & 0xFF
if msg_checksum != checksum:
raise Exception(
f"Wrong checksum, "
f"expected: {hex(checksum)}, received: {hex(msg_checksum)}"
)
payload = data[3:]
message = Message(category, command, payload)
logging.info(f"Received message from device: {message}")
return message
def _check_ack(self, command_byte: int, timeout: float) -> None:
message = self._recv_message_from_device(timeout)
if message.category != 0xB:
raise Exception("Not an ACK message")
if message.command != 0:
raise Exception("ACK should not have commands")
if command_byte != message.payload[0]:
raise Exception("ACK wrong command")
logging.info(f"Received ACK for {hex(command_byte)}")
def _send_message_to_device(
self,
message: Message,
use_checksum: bool,
ack_timeout: float,
) -> None:
command_byte = message.category << 4 | message.command << 1
data = struct.pack("<B", command_byte)
data += struct.pack("<H", len(message.payload) + 1)
data += message.payload
checksum = 0xAA - sum(data) & 0xFF if use_checksum else 0x88
data += struct.pack("<B", checksum)
logging.info(f"Sending message: {data.hex(' ')}")
is_first = True
while data:
if is_first:
chunk = data[:USB_CHUNK_SIZE]
data = data[USB_CHUNK_SIZE:]
is_first = False
else:
chunk = struct.pack("<B", command_byte | 1)
chunk += data[: USB_CHUNK_SIZE - 1]
data = data[USB_CHUNK_SIZE - 1 :]
assert len(chunk) <= USB_CHUNK_SIZE
logging.debug(f"Sending chunk: {chunk.hex(' ')}")
self.protocol.write(chunk)
self._check_ack(command_byte, ack_timeout)
def ping(self) -> None:
logging.debug("ping()")
self._send_message_to_device(Message(0, 0, b"\x00\x00"), True, 0.5)
def read_firmware_version(self) -> str:
logging.debug("firmware_version()")
self._send_message_to_device(Message(0xA, 4, b"\x00\x00"), True, 0.5)
message = self._recv_message_from_device(2)
if message.category != 0xA or message.command != 4:
raise Exception("Not a firmware version reply")
return message.payload.split(b"\x00")[0].decode()
def reset(self, reset_type: int, irq_status: bool):
logging.debug("reset()")
if reset_type == 0:
msg = 0b001
if irq_status:
msg |= 0b100
msg |= 20 << 8
elif reset_type == 1:
msg = 0b010
msg |= 50 << 8
elif reset_type == 2:
msg = 0b011
else:
raise Exception(f"Invalid reset type: {reset_type}")
request = Message(0xA, 1, msg.to_bytes(length=2, byteorder="little"))
self._send_message_to_device(request, True, 0.5)
if reset_type != 0 or not irq_status:
return None
reply = self._recv_message_from_device(1)
if reply.category != 0xA or reply.command != 1:
raise Exception("Not a reset reply")
irq_status_val = int.from_bytes(reply.payload, byteorder="little")
logging.debug(f"irq_status: {irq_status_val:#x}")
return irq_status_val
def _production_read(self, read_type: int) -> bytes:
request = Message(0xE, 2, struct.pack("<L", read_type))
self._send_message_to_device(request, True, 0.5)
reply = self._recv_message_from_device(1)
if reply.category != 0xE or reply.command != 2:
raise Exception("Not a production read reply")
payload = reply.payload
if payload[0] != 0:
raise Exception("Production read MCU failed")
payload = payload[1:]
msg_read_type = struct.unpack("<L", payload[:4])[0]
payload = payload[4:]
if read_type != msg_read_type:
raise Exception(
f"Wrong read type in reply, "
f"expected: {hex(read_type)}, received: {hex(msg_read_type)}"
)
payload_size = struct.unpack("<L", payload[:4])[0]
payload = payload[4:]
if payload_size != len(payload):
raise Exception(
f"Payload does not match reported size: "
f"{payload_size} != {len(payload)}"
)
return payload
def _production_write(self, data_type: int, data: bytes) -> None:
payload = struct.pack("<L", data_type)
payload += struct.pack("<L", len(data)) # Header size excluded
payload += data
self._send_message_to_device(Message(0xE, 1, payload), True, 0.5)
reply = self._recv_message_from_device(1)
if reply.category != 0xE or reply.command != 1:
raise Exception("Not a production write reply")
if reply.payload[0] != 0:
raise Exception("Production write MCU failed")
def _recv_mcu(self, read_type) -> bytes:
logging.debug("recv_mcu()")
message = self._recv_message_from_device(2)
if message.category != 0xD or message.command != 1:
raise Exception("Not a GTLS handshake message")
payload = message.payload
msg_read_type = struct.unpack("<L", payload[:4])[0]
if read_type != msg_read_type:
raise Exception(
f"Wrong read type in reply, "
f"expected: {hex(read_type)}, received: {hex(msg_read_type)}"
)
payload_size = struct.unpack("<L", payload[4:8])[0]
if payload_size != len(payload):
raise Exception(
f"Payload does not match reported size: "
f"{payload_size} != {len(payload)}"
)
return payload[8:]
def _send_mcu(self, data_type, data: bytes) -> None:
logging.debug("send_mcu()")
payload = struct.pack("<L", data_type)
payload += struct.pack("<L", len(data) + 8) # Header size included
payload += data
self._send_message_to_device(Message(0xD, 1, payload), True, 0.5)
def read_sealed_psk(self) -> bytes:
logging.debug("read_sealed_psk()")
return self._production_read(0xB001)
def write_sealed_psk(self, sealed_psk: bytes) -> None:
logging.debug("writing_sealed_psk()")
return self._production_write(0xB001, sealed_psk)
def write_psk_white_box(self, psk_white_box: bytes) -> None:
logging.debug("write_psk_white_box()")
self._production_write(0xB002, psk_white_box)
def read_psk_hash(self) -> bytes:
logging.debug("read_psk_hash()")
return self._production_read(0xB003)
def establish_gtls_connection(self, psk) -> None:
logging.debug("establish_gtls_connection()")
self.gtls_context = GTLSContext(psk, self)
self.gtls_context.establish_connection()
def read_data(self, addr: int, read_size: int, timeout: float) -> bytes:
request = b"\x00"
request += struct.pack("<H", addr)
request += struct.pack("<H", read_size)
self._send_message_to_device(Message(0x8, 0x1, request), True, 0.5)
reply = self._recv_message_from_device(timeout)
if reply.category != 0x8 or reply.command != 0x1:
raise Exception("Not a register read message")
return reply.payload
def read_otp(self, timeout: float) -> bytes:
self._send_message_to_device(Message(0xA, 0x3, b"\x00\x00"), True, 0.5)
reply = self._recv_message_from_device(timeout)
if reply.category != 0xA or reply.command != 0x3:
raise Exception("Not a register read message")
return reply.payload
def upload_config(self, config: bytes, timeout: float):
logging.debug("Uploading configuration")
self._send_message_to_device(Message(0x9, 0, config), True, 0.5)
reply = self._recv_message_from_device(timeout)
if reply.category != 0x9 or reply.command != 0:
raise Exception("Not a config message")
result = int.from_bytes(reply.payload, byteorder="little")
if result != 1:
raise Exception("Upload configuration failed")
def execute_fdt_operation(
self, fdt_op: FingerDetectionOperation, fdt_base: bytes, timeout: float = 0
):
if fdt_op == FingerDetectionOperation.DOWN:
assert len(fdt_base) == 24
op_code = 0xC
ack_timeout = timeout
elif fdt_op == FingerDetectionOperation.UP:
assert len(fdt_base) == 24
op_code = 0xE
ack_timeout = timeout
elif fdt_op == FingerDetectionOperation.MANUAL:
assert len(fdt_base) == 25
op_code = fdt_base[0]
fdt_base = fdt_base[1:]
ack_timeout = 0.5
payload = op_code.to_bytes(length=1, byteorder="little")
payload += int.to_bytes(1, length=1, byteorder="little") # always 1
payload += fdt_base
self._send_message_to_device(
Message(0x3, fdt_op.value, payload), True, ack_timeout
)
if fdt_op != FingerDetectionOperation.MANUAL:
return None
fdt_data, _ = self._get_finger_detection_data(fdt_op, timeout)
return fdt_data
def wait_for_fdt_event(
self, fdt_op: FingerDetectionOperation, timeout: Optional[float] = None
):
return self._get_finger_detection_data(fdt_op, timeout)
def _get_finger_detection_data(
self, fdt_op: FingerDetectionOperation, timeout: Optional[float]
):
reply = self._recv_message_from_device(timeout)
if reply.category != 0x3 or reply.command != fdt_op.value:
raise Exception("Not a finger detection reply")
payload = reply.payload
if len(payload) != 28:
raise Exception("Finger detection payload wrong length")
irq_status = int.from_bytes(payload[:2], byteorder="little")
payload = payload[2:]
logging.debug(f"IRQ status: {irq_status:#x}")
touch_flag = int.from_bytes(payload[:2], byteorder="little")
payload = payload[2:]
logging.debug(f"Touch flag: {touch_flag:#x}")
return payload, touch_flag
def get_image(self, request: bytes, timeout: float) -> bytes:
assert len(request) == 4
self._send_message_to_device(Message(0x2, 0, request), True, 0.5)
message = self._recv_message_from_device(timeout)
if message.category != 0x2 or message.command != 0:
raise Exception("Not an image message")
if self.gtls_context is None or not self.gtls_context.is_connected():
raise Exception("Invalid GTLS connection state")
return self.gtls_context.decrypt_sensor_data(message.payload)
def set_sleep_mode(self, timeout: float):
self._send_message_to_device(
Message(0x6, 0, int.to_bytes(1, length=2, byteorder="little")),
True,
timeout,
)
def ec_control(self, power: str, timeout: float):
if power == "on":
control_val = 1
elif power == "off":
control_val = 0
else:
raise ValueError
self._send_message_to_device(
Message(0xA, 7, control_val.to_bytes(1, byteorder="little") * 2 + b"\x00"),
True,
timeout,
)
reply = self._recv_message_from_device(500)
if reply.category != 0xA or reply.command != 7:
raise Exception("Not an EC control reply")
if int.from_bytes(reply.payload, | |
interested genes
### return data frame: Select only the genes interested in the data frame
result_df = df.loc[df.index.intersection(entrezIDs)].reindex(entrezIDs)
repo_genes, interested_genes = set(df.index), set(entrezIDs)
if not repo_genes.issuperset(interested_genes):
unfound = interested_genes - repo_genes
logger.debug("{!r} are not found!".format(unfound))
result_df.fillna(0, inplace=True)
return result_df
@classmethod
def __filter_celllines(cls, df, celllines):
### cell line: interested cell lines
### return data frame: select only the cell lines interested by user
result_df = df.loc[:, list(set(celllines) & set(df.columns))]
repo_celllines, interested_celllines, unfound = set(df.columns), set(celllines), {}
if not repo_celllines.issuperset(interested_celllines):
unfound = interested_celllines - repo_celllines
logger.debug("{!r} are not found!".format(unfound))
if len(unfound):
### use the back up expression dataframe data
cls.__initialize_backup_expression()
backup_celllines_repo = set(cls.backup_expression.columns)
if len(unfound.intersection(backup_celllines_repo)):
more_cellline_df = cls.__filter_celllines(cls.backup_expression, list(unfound))
result_df = pd.concat([result_df, more_cellline_df], axis=1)
result_df.fillna(0, inplace=True)
return result_df
@classmethod
def prepare_expresstion_df(cls, entrezIDs, celllines):
### entrezIDs, celllines: selection criterials
### return data frame: data frame that have interested cell lines and genes
### A375 ..... (celllines)
### 1003(entrez)
### ...
cls.__initialize_gene_expression()
result_df = cls.__filter_celllines(cls.gene_expression, celllines)
result_df = cls.__filter_genes(result_df, entrezIDs)
if setting.raw_expression_data_renew or not path.exists(setting.processed_expression_raw):
logger.debug("Persist gene expression data frame")
result_df.to_csv(setting.processed_expression_raw, index = False)
return result_df
class NetExpressDataLoader(CustomDataLoader):
netexpress_df = None
def __init__(self):
super().__init__()
@classmethod
def __initialize_gene_expression(cls):
### make sure only one gene expression data frame is instantiated in this class
### return: gene expression data frame
if cls.netexpress_df is None:
cls.netexpress_df = pd.read_csv(setting.netexpress_df, sep='\t')
logger.debug("Read in netexpress data successfully")
return cls.netexpress_df
@classmethod
def __filter_genes(cls, df, entrezIDs):
### genes: interested genes
### return data frame: Select only the genes interested in the data frame
result_df = df.loc[df.index.intersection(entrezIDs)].reindex(entrezIDs)
repo_genes, interested_genes = set(df.index), set(entrezIDs)
if not repo_genes.issuperset(interested_genes):
unfound = interested_genes - repo_genes
logger.debug("{!r} are not found!".format(unfound))
result_df.fillna(0, inplace=True)
return result_df
@classmethod
def prepare_netexpress_df(cls, entrezIDs):
### entrezIDs, celllines: selection criterials
### return data frame: data frame that have interested cell lines and genes
### A375 ..... (celllines)
### 1003(entrez)
### ...
cls.__initialize_gene_expression()
result_df = cls.__filter_genes(cls.netexpress_df, entrezIDs)
return result_df
class ECFPDataLoader(CustomDataLoader):
drug_ECFP = None
cl_ECFP = None
def __init__(self):
super().__init__()
@classmethod
def __dataloader_initializer(cls):
if cls.drug_ECFP is None:
cls.drug_ECFP = pd.read_csv(setting.drug_ECFP)
if cls.cl_ECFP is None:
cls.cl_ECFP = pd.read_csv(setting.cl_ECFP, index_col=0)
@classmethod
def get_drug_ecfp_data(cls, save_each_data_point = setting.save_each_ecfp_phy_data_point):
if cls.drug_ECFP is None:
cls.__dataloader_initializer()
cls.drug_ECFP = cls.drug_ECFP[['Name', 'ECFP_6']]
cls.drug_ECFP.set_index('Name', inplace= True)
cls.drug_ECFP = cls.drug_ECFP['ECFP_6'].apply(lambda i: pd.Series(list(i))).astype(int)
cls.drug_ECFP.columns = cls.drug_ECFP.columns.astype(str)
#cls.ECFP = cls.ECFP.loc[:,~((cls.drug_ECFP==0).all(axis = 0))]
cls.drug_ECFP = cls.drug_ECFP.loc[:, cls.__get_ecfp_filter(drug_filter_only=setting.ecfp_phy_drug_filter_only)]
if save_each_data_point:
if not path.exists("ecfp_datas"):
mkdir("ecfp_datas")
for i, one_drug_ecfp in enumerate(cls.drug_ECFP.values):
save(one_drug_ecfp, path.join("ecfp_datas", cls.drug_ECFP.index[i] + '.pt'))
return cls.drug_ECFP
@classmethod
def get_cl_ecfp_data(cls):
if cls.cl_ECFP is None:
cls.__dataloader_initializer()
cls.cl_ECFP = cls.cl_ECFP.loc[:, cls.__get_ecfp_filter()]
return cls.cl_ECFP
@classmethod
def __get_ecfp_filter(cls, drug_filter_only = False):
if cls.cl_ECFP is None or cls.drug_ECFP is None:
cls.__dataloader_initializer()
drug_filter = (~((cls.drug_ECFP==0).all(axis = 0)))
cl_filter = (~((cls.cl_ECFP==0).all(axis = 0)))
common_filter = (drug_filter & cl_filter)
if drug_filter_only:
return drug_filter
return common_filter
class PhysicochemDataLoader(CustomDataLoader):
drug_physicochem = None
cl_physicochem = None
def __init__(self):
super().__init__()
@classmethod
def __dataloader_initializer(cls):
if cls.drug_physicochem is None:
cls.drug_physicochem = pd.read_csv(setting.drug_physicochem, index_col=0)
if cls.cl_physicochem is None:
cls.cl_physicochem = pd.read_csv(setting.cl_physicochem, index_col = 0)
@classmethod
def get_drug_physicochem_property(cls, save_each_data_point = setting.save_each_ecfp_phy_data_point):
if cls.drug_physicochem is None:
cls.__dataloader_initializer()
cls.drug_physicochem.drop('SMILE', inplace=True, axis=1)
#cls.physicochem = cls.physicochem.loc[:, ~((cls.physicochem == 0).all(axis=0))]
cls.drug_physicochem = cls.drug_physicochem.loc[:, cls.__get_physicochem_filter(drug_filter_only=setting.ecfp_phy_drug_filter_only)]
physicochem_scaler = StandardScaler(with_mean=False)
physicochem = physicochem_scaler.fit_transform(cls.drug_physicochem)
physicochem = pd.DataFrame(physicochem, index=cls.drug_physicochem.index, columns=cls.drug_physicochem.columns)
cls.drug_physicochem = physicochem
if save_each_data_point:
if not path.exists("phy_datas"):
mkdir("phy_datas")
for i, one_drug_phy in enumerate(cls.drug_physicochem.values):
save(one_drug_phy, path.join("phy_datas", cls.drug_physicochem.index[i] + '.pt'))
return cls.drug_physicochem
@classmethod
def get_cl_physiochem_property(cls):
if cls.cl_physicochem is None:
cls.__dataloader_initializer()
cls.cl_physicochem = cls.cl_physicochem.loc[:, cls.__get_physicochem_filter()]
return cls.cl_physicochem
@classmethod
def __get_physicochem_filter(cls, drug_filter_only = False):
if cls.drug_physicochem is None or cls.cl_physicochem is None:
cls.__dataloader_initializer()
drug_filter = ~((cls.drug_physicochem == 0).all(axis=0))
cl_filter = ~((cls.cl_physicochem == 0).all(axis=0))
common_filter = drug_filter | cl_filter
if drug_filter_only:
return drug_filter
return common_filter
class SingleResponseDataLoader(CustomDataLoader):
single_response = None
def __init__(self):
super().__init__()
@classmethod
def __dataloader_initializer(cls):
if cls.single_response is None:
cls.single_response = pd.read_csv(setting.single_response, index_col=0).drop(['mean', 'sigma'], axis=1)
cls.single_response['drug'] = cls.single_response['drug'].str.upper()
cls.single_response.set_index(['cell_line', 'drug'], inplace = True)
@classmethod
def get_single_response(cls, save_each_data_point = setting.save_each_ecfp_phy_data_point):
if cls.single_response is None:
cls.__dataloader_initializer()
if save_each_data_point:
if not path.exists("single_datas"):
mkdir("single_datas")
for i, one_drug_single in enumerate(cls.single_response.values):
save(one_drug_single, path.join("single_datas", "_".join(cls.single_response.index[i]) + '.pt'))
return cls.single_response
class ProteomicsDataLoader(CustomDataLoader):
proteomics = None
def __init__(self):
super().__init__()
@classmethod
def __dataloader_initializer(cls):
if cls.proteomics is None:
cls.proteomics = pd.read_csv(setting.ccle_pro, index_col=0)
@classmethod
def get_proteomics(cls, save_each_data_point = setting.save_each_ecfp_phy_data_point):
if cls.proteomics is None:
cls.__dataloader_initializer()
if save_each_data_point:
if not path.exists("proteomics_datas"):
mkdir("proteomics_datas")
for i, one_cl_pro in enumerate(cls.proteomics.values):
save(one_cl_pro, path.join("proteomics_datas", cls.proteomics.index[i]) + '.pt')
return cls.proteomics
class RepresentationSamplesDataLoader(CustomDataLoader):
L1000_upregulation = None
F_cl = None
synergy_score = None
data_initialized = False
drug_a_features = None
drug_b_features = None
cellline_features = None
whole_df = None
def __init__(self):
super.__init__()
@classmethod
def __dataloader_initializer(cls):
if cls.data_initialized:
return
######################
### 5-FU ....
#####################
cls.L1000_downregulation = pd.read_csv(setting.L1000_upregulation, header = None, index_col = 0)
######################
### A2058 ......
#####################
cls.F_cl = pd.read_csv(setting.F_cl, header = None, index_col = 0)
### Reading synergy score data ###
### Unnamed: 0,drug_a_name,drug_b_name,cell_line,synergy
### 5-FU_ABT-888_A2058,5-FU,ABT-888,A2058,7.6935301658
### 5-FU_ABT-888_A2780,5-FU,ABT-888,A2780,7.7780530601
cls.synergy_score = SynergyDataReader.get_synergy_score()
cls.data_initialized = True
@classmethod
def __features_prep(cls):
### generate drugs features
if cls.drug_a_features is None or cls.drug_b_features is None or cls.cellline_features is None:
cls.__dataloader_initializer()
cls.drug_a_features = cls.L1000_upregulation.loc[list(cls.synergy_score['drug_a_name']), :].reset_index(drop=True)
#cls.drug_a_features.fillna(0, inplace=True)
cls.drug_b_features = cls.L1000_upregulation.loc[list(cls.synergy_score['drug_b_name']), :].reset_index(drop=True)
#cls.drug_b_features.fillna(0, inplace=True)
cls.cellline_features = cls.F_cl.loc[list(cls.synergy_score['cell_line']), :].reset_index(drop=True)
#cls.cellline_features.fillna(0, inplace=True)
return [cls.drug_a_features, cls.drug_b_features, cls.cellline_features]
@classmethod
def __construct_whole_raw_X(cls):
### return dataframe
### first_half_drugs_features first_half_cellline_features
### switched_second_half_drugs_features second_half_cellline_features
if cls.whole_df is None:
features_list = cls.__features_prep()
first_half = pd.concat(features_list, axis=1)
second_half = pd.concat([features_list[1], features_list[0], features_list[2]], axis=1)
cls.whole_df = pd.concat([first_half, second_half], axis=0).reset_index(drop=True)
return cls.whole_df
@classmethod
def Raw_X_features_prep(cls, methods):
### Generate final raw features dataset
### return: ndarray (n_samples, n_type_features, feature_dim) if 'attn'
### ndarray (n_samples, n_type_features * feature_dim) else
raw_x = cls.__construct_whole_raw_X().values
if methods == 'attn':
x = raw_x.reshape(-1, 3, setting.F_repr_feature_length)
else:
drug_features_len = int(1 / setting.n_feature_type * raw_x.shape[1])
cl_features_len = int(raw_x.shape[1] - 2 * drug_features_len)
assert cl_features_len == int((1 - 2 / setting.n_feature_type) * raw_x.shape[1]), \
"features len are calculated in wrong way"
var_filter = raw_x.var(axis=0) > 0
x = raw_x[:, var_filter]
return x
@classmethod
def Y_features_prep(cls):
### Generate final y features in ndarray (-1, 1)
cls.__dataloader_initializer()
Y_labels = cls.synergy_score.loc[:, 'synergy']
Y_half = Y_labels.values.reshape(-1, 1)
Y = np.concatenate((Y_half, Y_half), axis=0)
return Y
class SamplesDataLoader(CustomDataLoader):
entrez_set = None
cellline_set = None
network = None
simulated_drug_target = None
synergy_score = None
sel_dp = None
expression_df = None
netexpress_df = None
drug_a_features = None
drug_b_features = None
cellline_features = None
drug_features = None
data_initialized = False
whole_df = None
Y = None
drug_features_lengths = []
cellline_features_lengths = []
L1000_upregulation = None
L1000_downregulation = None
F_cl = None
var_filter = None
raw_x = None
combine_drug_multi_gene_express = None
single_drug_response = None
def __init__(self):
super().__init__()
@classmethod
def __dataloader_initializer(cls):
if cls.data_initialized:
return
cls.entrez_set = GenesDataReader.get_gene_entrez_set()
### Reading network data
### entrez_a entrez_b association
### 1001 10001 0.3
### 10001 100001 0.2
cls.network = NetworkDataReader.get_network()
### Creating test drug target matrix ###
### 1001 10001 235 32 25 2222
### 5-FU 1 0 0 0 0 0
### ABT-888 0 0 0 0 0 0
### AZD1775 0 1 1 0 1 0
### BORTEZOMIB 1 1 1 1 0 1
### CARBOPLATIN 0 0 0 0 1 0
if 'drug_target_profile' in setting.drug_features:
cls.simulated_drug_target = DrugTargetProfileDataLoader.get_filtered_simulated_drug_target_matrix()
### Reading synergy score data ###
### Unnamed: 0,drug_a_name,drug_b_name,cell_line,synergy
### 5-FU_ABT-888_A2058,5-FU,ABT-888,A2058,7.6935301658
### 5-FU_ABT-888_A2780,5-FU,ABT-888,A2780,7.7780530601
cls.synergy_score = SynergyDataReader.get_synergy_score()
cls.cellline_set = SynergyDataReader.get_synergy_data_cell_lines()
### Processing gene dependencies map
### "X127399","X1321N1","X143B",
### entrez
### 1001
### 10001
if 'gene_dependence' in setting.cellline_features:
cls.sel_dp = GeneDependenciesDataReader.get_gene_dp()
### Prepare gene expression data information
cls.expression_df = ExpressionDataLoader.prepare_expresstion_df(entrezIDs=list(cls.entrez_set),
celllines=list(cls.cellline_set))
if 'netexpress' in setting.cellline_features:
cls.netexpress_df = NetExpressDataLoader.prepare_netexpress_df(entrezIDs=list(cls.entrez_set))
if setting.add_single_response_to_drug_target:
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler(with_mean=False)
cls.single_drug_response = pd.read_csv(setting.single_response, index_col=0)
cls.single_drug_response['pIC50'] = scaler.fit_transform(cls.single_drug_response[['pIC50']]).reshape(-1,1)
if 'drug' in cls.single_drug_response.columns:
cls.single_drug_response['drug'] = cls.single_drug_response['drug'].str.upper()
######################
### 5-FU ....
#####################
if 'L1000_upregulation' in setting.drug_features:
cls.L1000_upregulation = pd.read_csv(setting.L1000_upregulation, header = None, index_col = 0)
if 'L1000_downregulation' in setting.drug_features:
cls.L1000_downregulation = pd.read_csv(setting.L1000_downregulation, header = None, index_col = 0)
if 'combine_drugs_for_cl' in setting.cellline_features:
cls.combine_drug_multi_gene_express = \
network_propagation.drug_combine_multiplication_gene_expression_network_propagation(cls.network,
cls.expression_df,
cls.entrez_set,
cls.simulated_drug_target,
cls.synergy_score,
setting.gene_expression_simulated_result_matrix)
cls.__check_data_frames()
cls.data_initialized = True
@classmethod
def __drug_features_prep(cls):
### generate drugs features
if cls.drug_a_features is None or cls.drug_b_features is None or cls.drug_features is None:
cls.__dataloader_initializer()
cls.drug_features = []
cls.drug_a_features = []
cls.drug_b_features = []
if 'drug_target_profile' in setting.drug_features:
drug_a_target_feature = cls.simulated_drug_target.loc[list(cls.synergy_score['drug_a_name']), :]
drug_a_target_feature = pd.DataFrame(drug_a_target_feature, columns=cls.entrez_set).reset_index(drop=True)
if setting.add_single_response_to_drug_target:
# drug_a_single_response = cls.single_drug_response.loc[list(cls.synergy_score['drug_a_name']), :]
| |
<gh_stars>100-1000
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['ExpressRoutePortArgs', 'ExpressRoutePort']
@pulumi.input_type
class ExpressRoutePortArgs:
def __init__(__self__, *,
bandwidth_in_gbps: pulumi.Input[int],
encapsulation: pulumi.Input[str],
peering_location: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
identity: Optional[pulumi.Input['ExpressRoutePortIdentityArgs']] = None,
link1: Optional[pulumi.Input['ExpressRoutePortLink1Args']] = None,
link2: Optional[pulumi.Input['ExpressRoutePortLink2Args']] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a ExpressRoutePort resource.
:param pulumi.Input[int] bandwidth_in_gbps: Bandwidth of the Express Route Port in Gbps. Changing this forces a new Express Route Port to be created.
:param pulumi.Input[str] encapsulation: The encapsulation method used for the Express Route Port. Changing this forces a new Express Route Port to be created. Possible values are: `Dot1Q`, `QinQ`.
:param pulumi.Input[str] peering_location: The name of the peering location that this Express Route Port is physically mapped to. Changing this forces a new Express Route Port to be created.
:param pulumi.Input[str] resource_group_name: The name of the Resource Group where the Express Route Port should exist. Changing this forces a new Express Route Port to be created.
:param pulumi.Input['ExpressRoutePortIdentityArgs'] identity: An `identity` block as defined below.
:param pulumi.Input['ExpressRoutePortLink1Args'] link1: A list of `link` blocks as defined below.
:param pulumi.Input['ExpressRoutePortLink2Args'] link2: A list of `link` blocks as defined below.
:param pulumi.Input[str] location: The Azure Region where the Express Route Port should exist. Changing this forces a new Express Route Port to be created.
:param pulumi.Input[str] name: The name which should be used for this Express Route Port. Changing this forces a new Express Route Port to be created.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags which should be assigned to the Express Route Port.
"""
pulumi.set(__self__, "bandwidth_in_gbps", bandwidth_in_gbps)
pulumi.set(__self__, "encapsulation", encapsulation)
pulumi.set(__self__, "peering_location", peering_location)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if identity is not None:
pulumi.set(__self__, "identity", identity)
if link1 is not None:
pulumi.set(__self__, "link1", link1)
if link2 is not None:
pulumi.set(__self__, "link2", link2)
if location is not None:
pulumi.set(__self__, "location", location)
if name is not None:
pulumi.set(__self__, "name", name)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="bandwidthInGbps")
def bandwidth_in_gbps(self) -> pulumi.Input[int]:
"""
Bandwidth of the Express Route Port in Gbps. Changing this forces a new Express Route Port to be created.
"""
return pulumi.get(self, "bandwidth_in_gbps")
@bandwidth_in_gbps.setter
def bandwidth_in_gbps(self, value: pulumi.Input[int]):
pulumi.set(self, "bandwidth_in_gbps", value)
@property
@pulumi.getter
def encapsulation(self) -> pulumi.Input[str]:
"""
The encapsulation method used for the Express Route Port. Changing this forces a new Express Route Port to be created. Possible values are: `Dot1Q`, `QinQ`.
"""
return pulumi.get(self, "encapsulation")
@encapsulation.setter
def encapsulation(self, value: pulumi.Input[str]):
pulumi.set(self, "encapsulation", value)
@property
@pulumi.getter(name="peeringLocation")
def peering_location(self) -> pulumi.Input[str]:
"""
The name of the peering location that this Express Route Port is physically mapped to. Changing this forces a new Express Route Port to be created.
"""
return pulumi.get(self, "peering_location")
@peering_location.setter
def peering_location(self, value: pulumi.Input[str]):
pulumi.set(self, "peering_location", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the Resource Group where the Express Route Port should exist. Changing this forces a new Express Route Port to be created.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter
def identity(self) -> Optional[pulumi.Input['ExpressRoutePortIdentityArgs']]:
"""
An `identity` block as defined below.
"""
return pulumi.get(self, "identity")
@identity.setter
def identity(self, value: Optional[pulumi.Input['ExpressRoutePortIdentityArgs']]):
pulumi.set(self, "identity", value)
@property
@pulumi.getter
def link1(self) -> Optional[pulumi.Input['ExpressRoutePortLink1Args']]:
"""
A list of `link` blocks as defined below.
"""
return pulumi.get(self, "link1")
@link1.setter
def link1(self, value: Optional[pulumi.Input['ExpressRoutePortLink1Args']]):
pulumi.set(self, "link1", value)
@property
@pulumi.getter
def link2(self) -> Optional[pulumi.Input['ExpressRoutePortLink2Args']]:
"""
A list of `link` blocks as defined below.
"""
return pulumi.get(self, "link2")
@link2.setter
def link2(self, value: Optional[pulumi.Input['ExpressRoutePortLink2Args']]):
pulumi.set(self, "link2", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
The Azure Region where the Express Route Port should exist. Changing this forces a new Express Route Port to be created.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name which should be used for this Express Route Port. Changing this forces a new Express Route Port to be created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A mapping of tags which should be assigned to the Express Route Port.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@pulumi.input_type
class _ExpressRoutePortState:
def __init__(__self__, *,
bandwidth_in_gbps: Optional[pulumi.Input[int]] = None,
encapsulation: Optional[pulumi.Input[str]] = None,
ethertype: Optional[pulumi.Input[str]] = None,
guid: Optional[pulumi.Input[str]] = None,
identity: Optional[pulumi.Input['ExpressRoutePortIdentityArgs']] = None,
link1: Optional[pulumi.Input['ExpressRoutePortLink1Args']] = None,
link2: Optional[pulumi.Input['ExpressRoutePortLink2Args']] = None,
location: Optional[pulumi.Input[str]] = None,
mtu: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
peering_location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
Input properties used for looking up and filtering ExpressRoutePort resources.
:param pulumi.Input[int] bandwidth_in_gbps: Bandwidth of the Express Route Port in Gbps. Changing this forces a new Express Route Port to be created.
:param pulumi.Input[str] encapsulation: The encapsulation method used for the Express Route Port. Changing this forces a new Express Route Port to be created. Possible values are: `Dot1Q`, `QinQ`.
:param pulumi.Input[str] ethertype: The EtherType of the Express Route Port.
:param pulumi.Input[str] guid: The resource GUID of the Express Route Port.
:param pulumi.Input['ExpressRoutePortIdentityArgs'] identity: An `identity` block as defined below.
:param pulumi.Input['ExpressRoutePortLink1Args'] link1: A list of `link` blocks as defined below.
:param pulumi.Input['ExpressRoutePortLink2Args'] link2: A list of `link` blocks as defined below.
:param pulumi.Input[str] location: The Azure Region where the Express Route Port should exist. Changing this forces a new Express Route Port to be created.
:param pulumi.Input[str] mtu: The maximum transmission unit of the Express Route Port.
:param pulumi.Input[str] name: The name which should be used for this Express Route Port. Changing this forces a new Express Route Port to be created.
:param pulumi.Input[str] peering_location: The name of the peering location that this Express Route Port is physically mapped to. Changing this forces a new Express Route Port to be created.
:param pulumi.Input[str] resource_group_name: The name of the Resource Group where the Express Route Port should exist. Changing this forces a new Express Route Port to be created.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags which should be assigned to the Express Route Port.
"""
if bandwidth_in_gbps is not None:
pulumi.set(__self__, "bandwidth_in_gbps", bandwidth_in_gbps)
if encapsulation is not None:
pulumi.set(__self__, "encapsulation", encapsulation)
if ethertype is not None:
pulumi.set(__self__, "ethertype", ethertype)
if guid is not None:
pulumi.set(__self__, "guid", guid)
if identity is not None:
pulumi.set(__self__, "identity", identity)
if link1 is not None:
pulumi.set(__self__, "link1", link1)
if link2 is not None:
pulumi.set(__self__, "link2", link2)
if location is not None:
pulumi.set(__self__, "location", location)
if mtu is not None:
pulumi.set(__self__, "mtu", mtu)
if name is not None:
pulumi.set(__self__, "name", name)
if peering_location is not None:
pulumi.set(__self__, "peering_location", peering_location)
if resource_group_name is not None:
pulumi.set(__self__, "resource_group_name", resource_group_name)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="bandwidthInGbps")
def bandwidth_in_gbps(self) -> Optional[pulumi.Input[int]]:
"""
Bandwidth of the Express Route Port in Gbps. Changing this forces a new Express Route Port to be created.
"""
return pulumi.get(self, "bandwidth_in_gbps")
@bandwidth_in_gbps.setter
def bandwidth_in_gbps(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "bandwidth_in_gbps", value)
@property
@pulumi.getter
def encapsulation(self) -> Optional[pulumi.Input[str]]:
"""
The encapsulation method used for the Express Route Port. Changing this forces a new Express Route Port to be created. Possible values are: `Dot1Q`, `QinQ`.
"""
return pulumi.get(self, "encapsulation")
@encapsulation.setter
def encapsulation(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "encapsulation", value)
@property
@pulumi.getter
def ethertype(self) -> Optional[pulumi.Input[str]]:
"""
The EtherType of the Express Route Port.
"""
return pulumi.get(self, "ethertype")
@ethertype.setter
def ethertype(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ethertype", value)
@property
@pulumi.getter
def guid(self) -> Optional[pulumi.Input[str]]:
"""
The resource GUID of the Express Route Port.
"""
return pulumi.get(self, "guid")
@guid.setter
def guid(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "guid", value)
@property
@pulumi.getter
def identity(self) -> Optional[pulumi.Input['ExpressRoutePortIdentityArgs']]:
| |
* np.cos(angle_rad) + yc
if (iring != 0 or not (iseg == nrings and "DARK" in kwargs)):
counter += 1
if not any(counter == np.array([1, 9, 52, 60])):
ap = ap + proper.prop_polygon(wf, 6, hexrad, xhex, yhex,
ROTATION=angle)
if (iring != 0):
xhex = -x * np.cos(angle_rad) - y * np.sin(angle_rad) + xc
yhex = -x * np.sin(angle_rad) + y * np.cos(angle_rad) + yc
counter += 1
if not any(counter == np.array([1, 9, 53, 61])):
ap = ap + proper.prop_polygon(wf, 6, hexrad, xhex, yhex,
ROTATION=angle)
y += hexsep
return ap
def falco_gen_pupil_Simple(inputs):
"""
Generate a custom simple circular pupil with an ID, OD, and struts.
Parameters
----------
inputs : dict
Dictionary of input parameters
Returns
-------
pupil : numpy ndarray
2-D pupil mask
"""
check.is_dict(inputs, 'inputs')
# Required dictionary keys
Nbeam = inputs["Nbeam"] # Aperture diameter in pixel widths
Narray = inputs["Npad"] # Number of points across 2-D, NxN output array
OD = inputs["OD"] # pupil outer diameter, can be < 1
# Optional dictionary keys
wStrut = inputs.get("wStrut", 0.) # width of each strut [pupil diameters]
angStrut = inputs.get("angStrut", [])
angStrutVec = np.atleast_1d(angStrut)
# if 'angStrut' in inputs:
# angStrutVec = inputs["angStrut"] # Azimuthal locations
# angStrutVec = np.array(angStrutVec)
# else:
# angStrutVec = np.array([])
# wStrut = 0
ID = inputs.get("ID", 0.) # central obscuration diam [pupil diameters]
centering = inputs.get("centering", "pixel")
xStretch = inputs.get("xStretch", 1.)
clocking = inputs.get("clocking", 0.) # [degrees]
xShear = inputs.get("xShear", 0.) # [pupil diameters]
yShear = inputs.get("yShear", 0.) # [pupil diameters]
flagHG = inputs.get("flagHG", False)
# Checks on dict keys
check.real_nonnegative_scalar(wStrut, 'wStrut', TypeError)
check.centering(centering)
if not isinstance(flagHG, bool):
raise TypeError("inputs['flagHG'] must be a bool")
if ID > OD:
raise ValueError("Inner diameter is larger than outer diameter.")
# By default, don't use hyger-gaussians for anti-aliasing the edges.
if not flagHG:
# Create outer aperture
inpOuter = {}
inpOuter["Nbeam"] = Nbeam
inpOuter["Narray"] = Narray
inpOuter["radiusX"] = xStretch*0.5*OD
inpOuter["radiusY"] = 0.5*OD
inpOuter["centering"] = centering
inpOuter["clockingDegrees"] = clocking
inpOuter["xShear"] = xShear
inpOuter["yShear"] = yShear
apOuter = gen_ellipse(inpOuter)
# Create inner obscuration
if ID > 0:
inpInner = {}
inpInner["Nbeam"] = Nbeam
inpInner["Narray"] = Narray
inpInner["radiusX"] = xStretch*0.5*ID
inpInner["radiusY"] = 0.5*ID
inpInner["centering"] = centering
inpInner["clockingDegrees"] = clocking
inpInner["xShear"] = xShear
inpInner["yShear"] = yShear
apInner = 1.0 - gen_ellipse(inpInner)
else:
apInner = 1
# Create strut obscurations
if angStrutVec.size == 0:
apStruts = 1
else:
# INITIALIZE PROPER
Dbeam = 1.0 # diameter of beam (normalized to itself)
dx = Dbeam/Nbeam
Darray = Narray*dx
wl_dummy = 1e-6 # dummy value
bdf = Dbeam/Darray # beam diameter fraction
if centering == 'pixel':
cshift = 0
elif centering == 'interpixel':
cshift = -dx/2.
bm = proper.prop_begin(Dbeam, wl_dummy, Narray, bdf)
# STRUTS
lStrut = 0.6 # [pupil diameters]
rcStrut0 = lStrut / 2.0
for iStrut in range(angStrutVec.size):
ang = angStrutVec[iStrut] + clocking
proper.prop_rectangular_obscuration(bm, lStrut, wStrut,
rcStrut0*cosd(ang)+cshift+xShear,
rcStrut0*sind(ang)+cshift+yShear,
ROTATION=ang)
apStruts = np.fft.ifftshift(np.abs(bm.wfarr))
# Combine all features
pupil = apOuter*apInner*apStruts
else:
hg_expon = 1000 # hyper-gaussian exponent for anti-aliasing
hg_expon_spider = 100 # hyper-gaussian exponent for anti-aliasing
apRad = Nbeam/2. # aperture radius in samples
# Create coordinates
if centering == 'pixel':
x = np.arange(-Narray/2, Narray/2)
elif centering == 'interpixel':
x = np.arange(-(Narray-1)/2, (Narray-1)/2+1)
RHO = falco.util.radial_grid(x, xStretch=xStretch)
THETA = falco.util.azimuthal_grid(x, xStretch=xStretch)
if ID > 0:
pupil = np.exp(-(RHO/(apRad*OD))**hg_expon) - \
np.exp(-(RHO/(apRad*ID))**hg_expon)
else:
pupil = np.exp(-(RHO/(apRad*OD))**hg_expon)
# Create spiders
if wStrut > 0:
try:
halfwidth = wStrut*apRad
for ang in angStrutVec:
pupil *= (1 - np.exp(-(RHO*np.sin(THETA-ang*np.pi/180)
/ halfwidth)**hg_expon_spider) *
(RHO*np.cos(THETA-ang*np.pi/180) > 0))
except:
raise TypeError("inputs['angStrut'] must be an iterable")
return pupil
def falco_gen_pupil_customHex(inputs):
"""
Generate a custom segmented pupil comprised of hexagonal segments.
Parameters
----------
inputs : dict
Dictionary of input parameters
Returns
-------
pupil : numpy ndarray
2-D pupil mask
"""
check.is_dict(inputs, 'inputs')
hg_expon = 1000 # hyper-gaussian exponent for anti-aliasing
hg_expon_spider = 100 # hyper-gaussian exponent for anti-aliasing
N = inputs["Npad"] # Number of samples in NxN grid
OD = inputs["OD"] # pupil outer diameter, can be < 1
ID = inputs["ID"] # central obscuration radius
apRad = inputs["Nbeam"]/2. # aperture radius in samples
if 'wStrut' in inputs:
wStrut = inputs["wStrut"] # width of all struts [pupil diameters], float
check.real_nonnegative_scalar(wStrut, 'wStrut', TypeError)
angStrutVec = inputs["angStrut"] # Azimuthal locations of the radial struts, array_like
else:
wStrut = 0
# Create coordinates
[X, Y] = np.meshgrid(np.arange(-N/2, N/2), np.arange(-N/2, N/2))
[THETA, RHO] = falco.util.cart2pol(X, Y)
inputs["apDia"] = inputs["Nbeam"]
if('pistons' in inputs.keys()):
pupil0 = falco.hexsegmirror.get_field(inputs)
else:
pupil0 = falco.hexsegmirror.get_support(inputs)
# Create inner and outer circles
if(ID > 0):
pupil = np.exp(-(RHO/(apRad*OD))**hg_expon) - np.exp(-(RHO/(apRad*ID))**hg_expon)
else:
pupil = np.exp(-(RHO/(apRad*OD))**hg_expon)
pupil = pupil*pupil0
# Create spiders
if(wStrut > 0):
halfwidth = wStrut*2.*apRad
try:
for ang in angStrutVec:
pupil = pupil*(1.-np.exp(-(RHO*np.sin(THETA-ang*np.pi/180.)/halfwidth)**hg_expon_spider) *
(RHO*np.cos(THETA-ang*np.pi/180.)>0))
except: # not iterable
raise TypeError("inputs['angStrut'] must be an iterable")
return pupil
# def falco_gen_pupil_LUVOIR_B(Nbeam):
# """
# Generate the LUVOIR B pupil.
# Parameters
# ----------
# Nbeam : float, int
# Number of points across the pupil diameter.
# Returns
# -------
# pupil : numpy ndarray
# 2-D pupil amplitude for LUVOIR B
# """
# D = 7.989 # meters, circumscribed. Segments 0.955m flat-to-flat. 6mm gaps.
# wGap = 6e-3/D # Fractional width of segment gaps
# inputs = {}
# inputs["Nbeam"] = Nbeam/0.925 # number of points across the pupil diameter
# inputs["wGap"] = wGap*Nbeam # number of samples across segment gaps
# inputs["numRings"] = 4 # Number of rings in hexagonally segmented mirror
# inputs["Npad"] = int(2**(falco.util.nextpow2(Nbeam)))
# inputs["ID"] = 0 # central obscuration radius
# inputs["OD"] = 1 # pupil outer diameter, can be < 1
# inputs["angStrut"] = np.array([]) # Angles of the struts (deg)
# inputs["wStrut"] = 0 # np.array([]) # Width of the struts (fraction of pupil diam.)
# missingSegments = np.ones(falco.hexsegmirror.count_segments(inputs["numRings"]),)
# for index in range(6):
# missingSegments[38+index*4 - 1] = 0
# inputs["missingSegments"] = missingSegments
# pupil = falco_gen_pupil_customHex(inputs)
# return pupil
def falco_gen_vortex_mask(charge, N):
"""
Generate a vortex phase mask.
Parameters
----------
charge : int, float
Charge of the vortex mask.
N : int
Number of points across output array.
Returns
-------
vortex : numpy ndarray
2-D vortex phase mask
"""
check.real_scalar(charge, 'charge', TypeError)
check.positive_scalar_integer(N, 'N', TypeError)
return np.exp(1j*charge*falco.util.azimuthal_grid(np.arange(-N/2., N/2.)))
def gen_ellipse(inputs):
pupil = falco_gen_ellipse(inputs)
return pupil
def falco_gen_ellipse(inputs):
"""
Generate a rotated ellipse with antialiased edges.
Parameters
----------
inputs : dict
dictionary of input values.
Returns
-------
pupil : numpy ndarray
2-D output mask.
"""
check.is_dict(inputs, 'inputs')
Nbeam = inputs['Nbeam']
Narray = inputs['Narray']
radiusX = inputs['radiusX']
radiusY = inputs['radiusY']
# Optional dictionary keys
centering = inputs["centering"] if('centering' in inputs) else 'pixel'
clockingDegrees = inputs["clockingDegrees"] if('clockingDegrees' in inputs) else 0.
clockingRadians = (np.pi/180.)*clockingDegrees
xShear = inputs["xShear"] if('xShear' in inputs) else 0.
yShear = inputs["yShear"] if('yShear' in inputs) else 0.
magFac = inputs["magFac"] if('magFac' in inputs) else 1
if centering == 'pixel':
x = np.linspace(-Narray/2., Narray/2. - 1, Narray)/float(Nbeam)
elif centering == 'interpixel':
x = np.linspace(-(Narray-1)/2., (Narray-1)/2., Narray)/float(Nbeam)
y = x
x = x - xShear
y = y - yShear
[X, Y] = np.meshgrid(x,y)
dx = x[1] - x[0]
radius = 0.5
RHO = 1/magFac*0.5*np.sqrt(
1/(radiusX)**2*(np.cos(clockingRadians)*X + np.sin(clockingRadians)*Y)**2
+ 1/(radiusY)**2*(np.sin(clockingRadians)*X - np.cos(clockingRadians)*Y)**2
)
halfWindowWidth = np.max(np.abs((RHO[1, 0]-RHO[0, 0], RHO[0, 1] - RHO[0, 0])))
pupil = -1*np.ones(RHO.shape)
pupil[np.abs(RHO) < radius - halfWindowWidth] = 1
pupil[np.abs(RHO) > radius + halfWindowWidth] = 0
grayInds = np.array(np.nonzero(pupil == -1))
# print('Number of grayscale points = %d' % grayInds.shape[1])
upsampleFactor = 100
dxUp = dx/float(upsampleFactor)
xUp = np.linspace(-(upsampleFactor-1)/2., (upsampleFactor-1)/2., upsampleFactor)*dxUp
# xUp = (-(upsampleFactor-1)/2:(upsampleFactor-1)/2)*dxUp
[Xup, Yup] = np.meshgrid(xUp, xUp)
subpixel = np.zeros((upsampleFactor, upsampleFactor))
for iInterior in range(grayInds.shape[1]):
subpixel = 0*subpixel
xCenter = X[grayInds[0, iInterior], grayInds[1, iInterior]]
yCenter = Y[grayInds[0, iInterior], grayInds[1, iInterior]]
RHOup = 0.5*np.sqrt(
1/(radiusX)**2*(np.cos(clockingRadians)*(Xup+xCenter) +
np.sin(clockingRadians)*(Yup+yCenter))**2
+ 1/(radiusY)**2*(np.sin(clockingRadians)*(Xup+xCenter) -
np.cos(clockingRadians)*(Yup+yCenter))**2)
subpixel[RHOup <= radius] = 1
pixelValue = np.sum(subpixel)/float(upsampleFactor**2)
pupil[grayInds[0, iInterior], grayInds[1, iInterior]] = pixelValue
return pupil
def rotate_shift_downsample_pupil_mask(arrayIn, nBeamIn, nBeamOut, xOffset,
yOffset, rotDeg):
"""
Translate, rotate, and downsample a pixel-centered mask.
Parameters
----------
arrayIn : np.ndarray
2-D, pixel-centered array containing the pupil mask.
nBeamIn : float
Number of points across beam at | |
include in the sighash, for a taproot script path spend.
"script_taproot": default_script_taproot,
# The inner pubkey for a taproot script path spend (32 bytes).
"pubkey_inner": default_pubkey_inner,
# The negation flag of the inner pubkey for a taproot script path spend.
"negflag": default_negflag,
# The leaf version to include in the sighash (this does not affect the one in the control block).
"leafversion": default_leafversion,
# The Merkle path to include in the control block for a script path spend.
"merklebranch": default_merklebranch,
# The control block to push for a taproot script path spend.
"controlblock": default_controlblock,
# Whether to produce signatures with invalid P sign (Schnorr signatures only).
"flag_flip_p": False,
# Whether to produce signatures with invalid R sign (Schnorr signatures only).
"flag_flip_r": False,
# == Parameters that can be changed without invalidating, but do have a default: ==
# The hashtype (as an integer).
"hashtype": default_hashtype,
# The annex (only when mode=="taproot").
"annex": None,
# The codeseparator position (only when mode=="taproot").
"codeseppos": -1,
# The redeemscript to add to the scriptSig (if P2SH; None implies not P2SH).
"script_p2sh": None,
# The script to add to the witness in (if P2WSH; None implies P2WPKH)
"script_witv0": None,
# The leaf to use in taproot spends (if script path spend; None implies key path spend).
"leaf": None,
# The input arguments to provide to the executed script
"inputs": [],
# == Parameters to be set before evaluation: ==
# - mode: what spending style to use ("taproot", "witv0", or "legacy").
# - key: the (untweaked) private key to sign with (ECKey object for ECDSA, 32 bytes for Schnorr).
# - tap: the TaprootInfo object (see taproot_construct; needed in mode=="taproot").
# - tx: the transaction to sign.
# - utxos: the UTXOs being spent (needed in mode=="witv0" and mode=="taproot").
# - idx: the input position being signed.
# - scriptcode: the scriptcode to include in legacy and witv0 sighashes.
}
def flatten(lst):
ret = []
for elem in lst:
if isinstance(elem, list):
ret += flatten(elem)
else:
ret.append(elem)
return ret
def spend(tx, idx, utxos, **kwargs):
"""Sign transaction input idx of tx, provided utxos is the list of outputs being spent.
Additional arguments may be provided that override any aspect of the signing process.
See DEFAULT_CONTEXT above for what can be overridden, and what must be provided.
"""
ctx = {**DEFAULT_CONTEXT, "tx":tx, "idx":idx, "utxos":utxos, **kwargs}
def to_script(elem):
"""If fed a CScript, return it; if fed bytes, return a CScript that pushes it."""
if isinstance(elem, CScript):
return elem
else:
return CScript([elem])
scriptsig_list = flatten(get(ctx, "scriptsig"))
scriptsig = CScript(b"".join(bytes(to_script(elem)) for elem in scriptsig_list))
witness_stack = flatten(get(ctx, "witness"))
return (scriptsig, witness_stack)
# === Spender objects ===
#
# Each spender is a tuple of:
# - A scriptPubKey which is to be spent from (CScript)
# - A comment describing the test (string)
# - Whether the spending (on itself) is expected to be standard (bool)
# - A tx-signing lambda returning (scriptsig, witness_stack), taking as inputs:
# - A transaction to sign (CTransaction)
# - An input position (int)
# - The spent UTXOs by this transaction (list of CTxOut)
# - Whether to produce a valid spend (bool)
# - A string with an expected error message for failure case if known
# - The (pre-taproot) sigops weight consumed by a successful spend
# - Whether this spend cannot fail
# - Whether this test demands being placed in a txin with no corresponding txout (for testing SIGHASH_SINGLE behavior)
Spender = namedtuple("Spender", "script,comment,is_standard,sat_function,err_msg,sigops_weight,no_fail,need_vin_vout_mismatch")
def make_spender(comment, *, tap=None, witv0=False, script=None, pkh=None, p2sh=False, spk_mutate_pre_p2sh=None, failure=None, standard=True, err_msg=None, sigops_weight=0, need_vin_vout_mismatch=False, **kwargs):
"""Helper for constructing Spender objects using the context signing framework.
* tap: a TaprootInfo object (see taproot_construct), for Taproot spends (cannot be combined with pkh, witv0, or script)
* witv0: boolean indicating the use of witness v0 spending (needs one of script or pkh)
* script: the actual script executed (for bare/P2WSH/P2SH spending)
* pkh: the public key for P2PKH or P2WPKH spending
* p2sh: whether the output is P2SH wrapper (this is supported even for Taproot, where it makes the output unencumbered)
* spk_mutate_pre_psh: a callable to be applied to the script (before potentially P2SH-wrapping it)
* failure: a dict of entries to override in the context when intentionally failing to spend (if None, no_fail will be set)
* standard: whether the (valid version of) spending is expected to be standard
* err_msg: a string with an expected error message for failure (or None, if not cared about)
* sigops_weight: the pre-taproot sigops weight consumed by a successful spend
"""
conf = dict()
# Compute scriptPubKey and set useful defaults based on the inputs.
if witv0:
assert tap is None
conf["mode"] = "witv0"
if pkh is not None:
# P2WPKH
assert script is None
pubkeyhash = hash160(pkh)
spk = CScript([OP_0, pubkeyhash])
conf["scriptcode"] = CScript([OP_DUP, OP_HASH160, pubkeyhash, OP_EQUALVERIFY, OP_CHECKSIG])
conf["script_witv0"] = None
conf["inputs"] = [getter("sign"), pkh]
elif script is not None:
# P2WSH
spk = CScript([OP_0, sha256(script)])
conf["scriptcode"] = script
conf["script_witv0"] = script
else:
assert False
elif tap is None:
conf["mode"] = "legacy"
if pkh is not None:
# P2PKH
assert script is None
pubkeyhash = hash160(pkh)
spk = CScript([OP_DUP, OP_HASH160, pubkeyhash, OP_EQUALVERIFY, OP_CHECKSIG])
conf["scriptcode"] = spk
conf["inputs"] = [getter("sign"), pkh]
elif script is not None:
# bare
spk = script
conf["scriptcode"] = script
else:
assert False
else:
assert script is None
conf["mode"] = "taproot"
conf["tap"] = tap
spk = tap.scriptPubKey
if spk_mutate_pre_p2sh is not None:
spk = spk_mutate_pre_p2sh(spk)
if p2sh:
# P2SH wrapper can be combined with anything else
conf["script_p2sh"] = spk
spk = CScript([OP_HASH160, hash160(spk), OP_EQUAL])
conf = {**conf, **kwargs}
def sat_fn(tx, idx, utxos, valid):
if valid:
return spend(tx, idx, utxos, **conf)
else:
assert failure is not None
return spend(tx, idx, utxos, **{**conf, **failure})
return Spender(script=spk, comment=comment, is_standard=standard, sat_function=sat_fn, err_msg=err_msg, sigops_weight=sigops_weight, no_fail=failure is None, need_vin_vout_mismatch=need_vin_vout_mismatch)
def add_spender(spenders, *args, **kwargs):
"""Make a spender using make_spender, and add it to spenders."""
spenders.append(make_spender(*args, **kwargs))
# === Helpers for the test ===
def random_checksig_style(pubkey):
"""Creates a random CHECKSIG* tapscript that would succeed with only the valid signature on witness stack."""
opcode = random.choice([OP_CHECKSIG, OP_CHECKSIGVERIFY, OP_CHECKSIGADD])
if (opcode == OP_CHECKSIGVERIFY):
ret = CScript([pubkey, opcode, OP_1])
elif (opcode == OP_CHECKSIGADD):
num = random.choice([0, 0x7fffffff, -0x7fffffff])
ret = CScript([num, pubkey, opcode, num + 1, OP_EQUAL])
else:
ret = CScript([pubkey, opcode])
return bytes(ret)
def random_bytes(n):
"""Return a random bytes object of length n."""
return bytes(random.getrandbits(8) for i in range(n))
def bitflipper(expr):
"""Return a callable that evaluates expr and returns it with a random bitflip."""
def fn(ctx):
sub = deep_eval(ctx, expr)
assert isinstance(sub, bytes)
return (int.from_bytes(sub, 'little') ^ (1 << random.randrange(len(sub) * 8))).to_bytes(len(sub), 'little')
return fn
def zero_appender(expr):
"""Return a callable that evaluates expr and returns it with a zero added."""
return lambda ctx: deep_eval(ctx, expr) + b"\x00"
def byte_popper(expr):
"""Return a callable that evaluates expr and returns it with its last byte removed."""
return lambda ctx: deep_eval(ctx, expr)[:-1]
# Expected error strings
ERR_SIG_SIZE = {"err_msg": "Invalid Schnorr signature size"}
ERR_SIG_HASHTYPE = {"err_msg": "Invalid Schnorr signature hash type"}
ERR_SIG_SCHNORR = {"err_msg": "Invalid Schnorr signature"}
ERR_OP_RETURN = {"err_msg": "OP_RETURN was encountered"}
ERR_CONTROLBLOCK_SIZE = {"err_msg": "Invalid Taproot control block size"}
ERR_WITNESS_PROGRAM_MISMATCH = {"err_msg": "Witness program hash mismatch"}
ERR_PUSH_LIMIT = {"err_msg": "Push value size limit exceeded"}
ERR_DISABLED_OPCODE = {"err_msg": "Attempted to use a disabled opcode"}
ERR_TAPSCRIPT_CHECKMULTISIG = {"err_msg": "OP_CHECKMULTISIG(VERIFY) is not available in tapscript"}
ERR_MINIMALIF = {"err_msg": "OP_IF/NOTIF argument must be minimal in tapscript"}
ERR_UNKNOWN_PUBKEY = {"err_msg": "Public key is neither compressed or uncompressed"}
ERR_STACK_SIZE = {"err_msg": "Stack size limit exceeded"}
ERR_CLEANSTACK = {"err_msg": "Stack size must be exactly one after execution"}
ERR_STACK_EMPTY = {"err_msg": "Operation not valid with the current stack size"}
ERR_SIGOPS_RATIO = {"err_msg": "Too much signature validation relative to witness weight"}
ERR_UNDECODABLE = {"err_msg": "Opcode missing or not understood"}
ERR_NO_SUCCESS = {"err_msg": "Script evaluated without error but finished with a false/empty top stack element"}
ERR_EMPTY_WITNESS = {"err_msg": "Witness program was passed an empty witness"}
ERR_CHECKSIGVERIFY = {"err_msg": "Script failed an OP_CHECKSIGVERIFY operation"}
VALID_SIGHASHES_ECDSA = [
SIGHASH_ALL,
SIGHASH_NONE,
SIGHASH_SINGLE,
SIGHASH_ANYONECANPAY + SIGHASH_ALL,
SIGHASH_ANYONECANPAY + SIGHASH_NONE,
SIGHASH_ANYONECANPAY + SIGHASH_SINGLE
]
VALID_SIGHASHES_TAPROOT = [SIGHASH_DEFAULT] + VALID_SIGHASHES_ECDSA
VALID_SIGHASHES_TAPROOT_SINGLE = [
SIGHASH_SINGLE,
SIGHASH_ANYONECANPAY |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.